code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package dev.budget.reconciler import dev.budget.reconciler.modules.{JsonModule, ApplicationModule, ElasticSearchModule} import scaldi.Injectable object Main { def main(args: Array[String]) { implicit val mainInjector = new ApplicationModule :: new ElasticSearchModule :: new JsonModule val application: ReconcilerApplication = Injectable.inject [ReconcilerApplication] application.run(args) } }
jhungerford/MintYnabReconciler
src/main/scala/dev/budget/reconciler/Main.scala
Scala
apache-2.0
415
package io.ddf.flink.content import io.ddf.DDF import io.ddf.exception.DDFException import io.ddf.flink.BaseSpec import scala.collection.JavaConversions._ class ViewHandlerSpec extends BaseSpec { val airlineDDF = loadAirlineDDF() val yearNamesDDF = loadYearNamesDDF() it should "project after remove columns " in { val ddf = airlineDDF val ddf0 = flinkDDFManager.sql2ddf("select * from airline") val yearLabel = "Year" val depTimeLabel = "DepTime" val columns: java.util.List[String] = List(yearLabel, depTimeLabel, "Month") val newddf1: DDF = ddf.VIEWS.removeColumn(yearLabel) newddf1.getNumColumns should be(28) val newddf2: DDF = ddf.VIEWS.removeColumns(depTimeLabel) val newddf3: DDF = ddf0.VIEWS.removeColumns(columns) newddf2.getNumColumns should be(27) newddf3.getNumColumns should be(26) } it should "test sample" in { val ddf = loadMtCarsDDF() val sample = ddf.VIEWS.getRandomSample(10) sample.get(0)(0).asInstanceOf[Double] should not be (sample.get(1)(0).asInstanceOf[Double]) sample.get(1)(0).asInstanceOf[Double] should not be (sample.get(2)(0).asInstanceOf[Double]) sample.get(2)(0).asInstanceOf[Double] should not be (sample.get(3)(0).asInstanceOf[Double]) sample.size should be(10) } it should "test sample with percentage" in { val sample = ddf.VIEWS.getRandomSample(0.5, false, 1) sample.VIEWS.head(3) should have size(3) } it should "throw an error when sample percentage is invalid" in { intercept[IllegalArgumentException] { ddf.VIEWS.getRandomSample(5.0, false, 1) } } it should "get top 3 rows" in { val sample = flinkDDFManager.sql2ddf("SELECT Month from airline") flinkDDFManager.setDDFName(sample, "sample") sample.VIEWS.head(3) should have size(3) } }
milliondreams/ddf-with-flink
flink/src/test/scala/io/ddf/flink/content/ViewHandlerSpec.scala
Scala
apache-2.0
1,815
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.accounts.frs102 import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import uk.gov.hmrc.ct.accounts.frs102.boxes._ import uk.gov.hmrc.ct.accounts.frs102.retriever.FullAccountsBoxRetriever trait BoxesFixture extends MockitoSugar { implicit val boxRetriever = mock[FullAccountsBoxRetriever] def ac42withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac42()).thenReturn(AC42(Some(99))) def ac42noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac42()).thenReturn(AC42(None)) def ac43noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac43()).thenReturn(AC43(None)) def ac43withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac43()).thenReturn(AC43(Some(99))) def acq5021noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5021()).thenReturn(ACQ5021(None)) def acq5021false(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5021()).thenReturn(ACQ5021(Some(false))) def acq5021withValue(implicit boxRetriever: FullAccountsBoxRetriever) = acq5021false def acq5022noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5022()).thenReturn(ACQ5022(None)) def acq5022false(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5022()).thenReturn(ACQ5022(Some(false))) def acq5022withValue(implicit boxRetriever: FullAccountsBoxRetriever) = acq5022false def ac44withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac44()).thenReturn(AC44(Some(99))) def ac44noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac44()).thenReturn(AC44(None)) def ac45noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac45()).thenReturn(AC45(None)) def ac45withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.ac45()).thenReturn(AC45(Some(99))) def acq5031noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5031()).thenReturn(ACQ5031(None)) def acq5031withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5031()).thenReturn(ACQ5031(Some(false))) def acq5032noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5032()).thenReturn(ACQ5032(None)) def acq5032withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5032()).thenReturn(ACQ5032(Some(false))) def acq5033noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5033()).thenReturn(ACQ5033(None)) def acq5033withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5033()).thenReturn(ACQ5033(Some(false))) def acq5034noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5034()).thenReturn(ACQ5034(None)) def acq5034withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5034()).thenReturn(ACQ5034(Some(false))) def acq5035noValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5035()).thenReturn(ACQ5035(None)) def acq5035withValue(implicit boxRetriever: FullAccountsBoxRetriever) = when(boxRetriever.acq5035()).thenReturn(ACQ5035(Some(false))) }
liquidarmour/ct-calculations
src/test/scala/uk/gov/hmrc/ct/accounts/frs102/BoxesFixture.scala
Scala
apache-2.0
3,917
package breeze.data; import java.net.URL; import scala.io.Source; /* Copyright 2009 David Hall, Daniel Ramage Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** * A DataMatrix stores a double-valued label along with the double-valued features that go with it. * * TODO: change to DenseVector */ trait DataMatrix { def rows: Seq[Example[Double,Seq[Double]]]; def row(i: Int) : Example[Double,Seq[Double]] = rows(i); def partition(f: Example[Double,Seq[Double]]=>Boolean) = rows.partition(f); } object DataMatrix { /** * Downloads a DataMatrix from a URL. The DataMatrix format is a space-separated values file of doubles * with one column a label column. * @param url: where * @param labelColumn which column (starting at 0) is the label. May be negative, in which case it starts from the end. * @param separator: a regex for delimeters. Defaults to \\\\s+ * @param dropRow: delete the first row */ def fromURL(url: URL, labelColumn:Int=0, separator: String="\\\\s+", dropRow: Boolean = false) : DataMatrix = { fromSource(Source.fromURL(url),labelColumn,separator,dropRow); } /** * Reads a DataMatrix from a Source. The DataMatrix format is a space-separated values file of doubles * with one column a label column. * @param url: where * @param labelColumn which column (starting at 0) is the label. May be negative, in which case it starts from the end. * @param separator: a regex for delimeters. Defaults to \\\\s+ * @param dropRow: delete the first row */ def fromSource(src: Source, labelColumn:Int=0, separator: String="\\\\s+", dropRow: Boolean = false) : DataMatrix = { val rowsIterator = for { (line,i) <- src.getLines().zipWithIndex; if !dropRow || i != 0 allCols = line.split(separator) map (_.toDouble); lbl = allCols(if(labelColumn < 0) allCols.length + labelColumn else labelColumn); dataCols = allCols.patch(labelColumn,Seq.empty,1) } yield Example[Double,Seq[Double]](label=lbl,features=dataCols,id=i.toString); new DataMatrix { val rows = rowsIterator.toSeq; } } }
tjhunter/scalanlp-core
process/src/main/scala/breeze/data/DataMatrix.scala
Scala
apache-2.0
2,591
/** * Copyright (c) 2007-2011 Eric Torreborre <etorreborre@yahoo.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of * the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote * products derived from this software without specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package org.specs.mock import org.specs._ object returnValues extends returnValues class returnValues extends HtmlSpecification("Return values") with MockitoSpecification { <wiki> h3. Return values Optional ReturnValues can be used with mock[Class](ReturnValues). "ReturnValues":http://mockito.googlecode.com/svn/branches/1.7/javadoc/org/mockito/ReturnValues.html defines the return values of unstubbed invocations. This implementation can be helpful when working with legacy code. Unstubbed methods often return null. If your code uses the object returned by an unstubbed call you get a NullPointerException. This implementation of ReturnValues makes unstubbed methods return SmartNull instead of null. SmartNull gives nicer exception message than NPE because it points out the line where unstubbed method was called. You just click on the stack trace. SmartNullReturnValues can be set on mocks with the smartMock method. It first tries to return ordinary return values (see "MoreEmptyReturnValues":http://mockito.googlecode.com/svn/branches/1.7/javadoc/org/mockito/internal/returnvalues/MoreEmptyReturnValues.html) then it tries to return SmartNull. If the return type is final then plain null is returned. {""" import org.specs._ import org.specs.mock.Mockito""" prelude it shh } For Example: {""" class s1 extends Specification with Mockito { val got = mock[org.specs.mock.Hello].get(0) } """ snip it } <ex>The returned value should yield a NullPointerException</ex>: { "new s1().got.toString" add it } { >("NullPointerException")} If @smartMock@ is used: {""" class s2 extends Specification with Mockito { val got = smartMock[org.specs.mock.Hello].get(0) } """ snip it } <ex>Accessing the returned value will yield an empty string instead of @null@</ex>: { "new s2().got" add it } { >("")} </wiki> isSus } trait Hello { def get(i:Int): String }
stuhood/specs
src/test/scala/org/specs/mock/returnValues.scala
Scala
mit
3,219
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.command.table import scala.collection.JavaConverters._ import org.apache.spark.sql.{CarbonEnv, Row, SparkSession, _} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.execution.command.MetadataCommand import org.apache.spark.sql.parser.CarbonSparkSqlParserUtil import org.apache.spark.util.SparkUtil import org.apache.carbondata.common.logging.LogServiceFactory import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier import org.apache.carbondata.core.metadata.schema.partition.PartitionType import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo} import org.apache.carbondata.core.util.ThreadLocalSessionInfo import org.apache.carbondata.events.{CreateTablePostExecutionEvent, CreateTablePreExecutionEvent, OperationContext, OperationListenerBus} import org.apache.carbondata.spark.util.CarbonSparkUtil case class CarbonCreateTableCommand( tableInfo: TableInfo, ifNotExistsSet: Boolean = false, tableLocation: Option[String] = None, isExternal : Boolean = false, createDSTable: Boolean = true, isVisible: Boolean = true) extends MetadataCommand { override def processMetadata(sparkSession: SparkSession): Seq[Row] = { val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) val tableName = tableInfo.getFactTable.getTableName var databaseOpt : Option[String] = None ThreadLocalSessionInfo .setConfigurationToCurrentThread(sparkSession.sessionState.newHadoopConf()) if (tableInfo.getDatabaseName != null) { databaseOpt = Some(tableInfo.getDatabaseName) } val dbName = CarbonEnv.getDatabaseName(databaseOpt)(sparkSession) setAuditTable(dbName, tableName) setAuditInfo(tableInfo.getFactTable.getTableProperties.asScala.toMap ++ Map("external" -> isExternal.toString)) // set dbName and tableUnique Name in the table info tableInfo.setDatabaseName(dbName) tableInfo.setTableUniqueName(CarbonTable.buildUniqueName(dbName, tableName)) val isTransactionalTable = tableInfo.isTransactionalTable if (sparkSession.sessionState.catalog .tableExists(TableIdentifier(tableName, Some(dbName)))) { if (!ifNotExistsSet) { throw new TableAlreadyExistsException(dbName, tableName) } } else { val tablePath = CarbonEnv.createTablePath( Some(dbName), tableName, tableInfo.getFactTable.getTableId, tableLocation, isExternal, isTransactionalTable )(sparkSession) tableInfo.setTablePath(tablePath) CarbonSparkSqlParserUtil.validateTableProperties(tableInfo) val tableIdentifier = AbsoluteTableIdentifier .from(tablePath, dbName, tableName, tableInfo.getFactTable.getTableId) val operationContext = new OperationContext val createTablePreExecutionEvent: CreateTablePreExecutionEvent = CreateTablePreExecutionEvent(sparkSession, tableIdentifier, Some(tableInfo)) OperationListenerBus.getInstance.fireEvent(createTablePreExecutionEvent, operationContext) val catalog = CarbonEnv.getInstance(sparkSession).carbonMetaStore val carbonSchemaString = catalog.generateTableSchemaString(tableInfo, tableIdentifier) if (createDSTable) { try { val tablePath = tableIdentifier.getTablePath val carbonRelation = CarbonSparkUtil.createCarbonRelation(tableInfo, tablePath) val rawSchema = CarbonSparkUtil.getRawSchema(carbonRelation) SparkUtil.setNullExecutionId(sparkSession) val partitionInfo = tableInfo.getFactTable.getPartitionInfo val partitionString = if (partitionInfo != null && partitionInfo.getPartitionType == PartitionType.NATIVE_HIVE) { s" PARTITIONED BY (${partitionInfo.getColumnSchemaList.asScala.map( _.getColumnName.toLowerCase).mkString(",")})" } else { "" } // add carbon properties into option list in addition to carbon default properties val repeatedPropKeys = Seq("tablename", "dbname", "tablePath", "isExternal", "path", "isTransactional", "isVisible", "carbonSchemaPartsNo") val tableProperties = tableInfo .getFactTable .getTableProperties .asScala .filter(prop => !repeatedPropKeys.exists(_.equalsIgnoreCase(prop._1))) .map { property => s""" ${ property._1 } "${ property._2 }",""" } .mkString("\\n", "\\n", "") // synchronized to prevent concurrently creation of table with same name CarbonCreateTableCommand.synchronized { // isVisible property is added to hive table properties to differentiate between main // table and mv. It is false only for mv's. This is added // to improve the show tables performance when filtering the MV from main tables sparkSession.sql( s"""CREATE TABLE $dbName.$tableName |(${ rawSchema }) |USING carbondata |OPTIONS (${tableProperties} | tableName "$tableName", | dbName "$dbName", | tablePath "$tablePath", | path "${FileFactory.addSchemeIfNotExists(tablePath)}", | isExternal "$isExternal", | isTransactional "$isTransactionalTable", | isVisible "$isVisible" | $carbonSchemaString) | $partitionString """.stripMargin).collect() } } catch { case e: AnalysisException => // AnalysisException thrown with table already exists message in case of // concurrent drivers if (e.getMessage().contains("already exists")) { // Clear the cache first CarbonEnv.getInstance(sparkSession).carbonMetaStore .removeTableFromMetadata(dbName, tableName) // Delete the folders created by this call if the actual path is different val actualPath = CarbonEnv .getCarbonTable(TableIdentifier(tableName, Option(dbName)))(sparkSession) .getTablePath if (!actualPath.equalsIgnoreCase(tablePath)) { LOGGER .error( "TableAlreadyExists with path : " + actualPath + " So, deleting " + tablePath) FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(tablePath)) } // No need to throw for create if not exists if (ifNotExistsSet) { LOGGER.error(e, e) } else { LOGGER.error(e) throw e } } else { LOGGER.error(e) throw e } case e: Exception => // call the drop table to delete the created table. try { CarbonEnv.getInstance(sparkSession).carbonMetaStore .dropTable(tableIdentifier)(sparkSession) } catch { case _: Exception => // No operation } throw e } } val createTablePostExecutionEvent: CreateTablePostExecutionEvent = CreateTablePostExecutionEvent(sparkSession, tableIdentifier) OperationListenerBus.getInstance.fireEvent(createTablePostExecutionEvent, operationContext) } Seq.empty } override protected def opName: String = "CREATE TABLE" }
zzcclp/carbondata
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
Scala
apache-2.0
8,721
package mq import akka.actor._ import akka.cluster.{MemberStatus, Cluster} import akka.contrib.pattern.ClusterSingletonProxy import com.typesafe.config.ConfigFactory import mq.ClusterProtocol._ import mq.util.{RemoteAddressExtension, HashRingNode, HashRing} import scala.util.Try import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ /** * Created by bruce on 25/02/15. */ class BrokerMaster extends Actor with ActorLogging { val cluster = Cluster(context.system) import scala.concurrent.ExecutionContext.Implicits.global implicit val timeout = Timeout(5 seconds) // http://doc.akka.io/docs/akka/2.3.1/scala/cluster-usage.html#Cluster_Aware_Routers val mqMaster = context.actorOf(ClusterSingletonProxy.props(singletonPath = "/user/clusterSingleton/mqMaster", role = Some("seed")), name = "mqMasterProxy") // node master // 1. ask self nodeId -> feedback // 2. ask self consumers to run -> feedback // 3. ask self consumers to run -> feedback // 4. be told consumers to run -> feedback // cluster master // 1. node list // 2. consumer list // 3. tell node master to run consumer // 4. add new consumer // 5. stop old consumer // 6. rebalancing // 7. stop a node or mark a node standby // 8. stop a consumer or mark a consumer pause // 9. consistent hash assign jobs // 10. consumers/ node management as a queue // 11. when cluster master down, will receive cluster from all nodes then recovery old status/ or save cluster status to cassandra and reload // 12. cluster status: nodeId -> consumerId list // 13. log cluster status changes to cassandra // consumer // 1. status: pause/run // 2. nodeId // node // 1. status: nodeId // 2. capicity // 3. consumers Id def getConsumerNodeRing(cluster: Cluster): HashRing = { var nodeList : List[HashRingNode] = List() cluster.state.members.filter(_.status == MemberStatus.Up ).map{ (m) => if(m.roles.contains("consumer")) { nodeList = HashRingNode(m.address.toString, 50) :: nodeList } } new HashRing(nodeList) } // start actor on remote node // val one = AddressFromURIString("akka.tcp://sys@host:1234") // val ref = system.actorOf(Props[SampleActor]. // withDeploy(Deploy(scope = RemoteScope(address)))) def receive: Receive = { case consumer: Consumer => val workerId = "consumer-" + consumer.consumerId + "-" + consumer.shardId log.info("Bring up consumer: " + workerId) // create new consumer // consumerType: "mq.consumer.ConsolePrint" val worker = context.actorOf(Props(Class.forName(consumer.consumertype).asInstanceOf[Class[BrokerActor]]), workerId) context.watch(worker) worker ! consumer // update db to mark consumer status case Start(consumer) => val address = RemoteAddressExtension(context.system).address log.debug("Self remote address: {}" + address) val nodeRing = getConsumerNodeRing(cluster) val nid = nodeRing.get(consumer.consumerId + consumer.shardId) if(nid.getOrElse("") == address.toString) { log.info("Begin to start consumer: {}", consumer) self ! consumer } case Stop(consumer) => val workerId = "consumer-" + consumer.consumerId + "-" + consumer.shardId context.children.foreach { c => if(c.path.name.equals(workerId)) { log.info("Begin to stop " + c.path.name + " " + c.path) c ! PoisonPill } } // // will not be used // case "start" => // println("recalculate the cluster:") // // val address = RemoteAddressExtension(context.system).address // // println("self address: " + address) // // var nodeList : List[HashRingNode] = List() // cluster.state.members.filter(_.status == MemberStatus.Up ).map{ (m) => // if(m.roles.contains("consumer")) { // nodeList = HashRingNode(m.address.toString, 50) :: nodeList // } // } // // val nodeRing = new HashRing(nodeList) // // println("node list:" + nodeList) // // val consumers = Consumers.getAllConsumers() // for(consumer <- consumers) { // consumer.foreach{(c) => // // only start consumer marked to run at this node // val nid = nodeRing.get(c.consumerId + c.shardId) // //println(c + " -> " + nid.getOrElse("")) // // if(nid.getOrElse("") == address.toString) { // println("will start: " + c) // self ! c // } // // } // } case Info => val members = cluster.state.members.filter(_.status == MemberStatus.Up) log.debug("cluster info: " + members) mqMaster ? "hi" onSuccess { case resp => println(resp) } log.debug("Node children: " + context.children.toString()) case StopAll => context.children.foreach { c => if(c.path.name.startsWith("consumer")) { log.info("Begin to stop: " + c.path.name + " " + c.path) c ! PoisonPill } } case _ => } }
ElasticQueue/ElasticQueue
src/main/scala/mq/BrokerMaster.scala
Scala
mit
5,072
package io.fintrospect.util import com.twitter.finagle.http.{Message, Response, Status} object HttpRequestResponseUtil { def contentFrom(msg: Message): String = msg.contentString def statusAndContentFrom(msg: Response): (Status, String) = (msg.status, msg.contentString) def headersFrom(msg: Message): Map[String, String] = Map(msg.headerMap.map(entry => entry._1 -> entry._2).toSeq: _*) def headerOf(name: String)(msg: Message) = msg.headerMap.getAll(name).mkString(", ") }
daviddenton/fintrospect
core/src/main/scala/io/fintrospect/util/HttpRequestResponseUtil.scala
Scala
apache-2.0
488
class Foo[A] { def map(f: (A) => Int)(i: Int)(j: Int): A = null.asInstanceOf[A] def fooz(i: Int)(s: String) = 42 def fooz(i: Int)(j: Int) = 43 def puk[A, B](a:A)(b:B) = null def gul[A](a:A): A = null.asInstanceOf[A] def gul(i:Int) : Int = i def pal(i: Int)(s: String) = null def pal(i: String)(j: Int) = null } object Main { def main(args: Array[String]) { val foo: Foo[String] = new Foo[String]; // println(foo.gul[Int](13)) val d = foo.pal(42) println(d) // foo.map(s => s)(32)(32) // foo.puk[Int, Int](42) foo.<ref>gul(42) // 4.asInstanceOf[Foo] // foo.fooz(239)(45) // foo.fooz(42)[Int](53) } }
ilinum/intellij-scala
testdata/resolve/functions/typeParam2/tp2.scala
Scala
apache-2.0
679
package com.crackcell.jiezi.dict.loader.io import java.io.InputStream /** * InputStream * * @author Menglong TAN */ class StreamToStream extends ToStream[InputStream] { override def toStream(source: InputStream) = source }
crackcell/jiezi
core/src/main/scala/com/crackcell/jiezi/dict/loader/io/StreamToStream.scala
Scala
apache-2.0
235
/* * Copyright (c) 2014-2020 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.benchmarks import java.util.concurrent.TimeUnit import scala.concurrent.Await import scala.concurrent.duration.Duration import cats.effect.IO import cats.implicits._ import monix.eval.Task import org.openjdk.jmh.annotations._ import cats.effect.implicits._ import zio.ZIO import scala.concurrent.Future /** To do comparative benchmarks between versions: * * benchmarks/run-benchmark TaskSequenceBenchmark * * This will generate results in `benchmarks/results`. * * Or to run the benchmark from within SBT: * * jmh:run monix.benchmarks.TaskSequenceBenchmark * The above test will take default values as "10 iterations", "10 warm-up iterations", * "2 forks", "1 thread". * * Or to specify custom values use below format: * * jmh:run -i 20 -wi 20 -f 4 -t 2 monix.benchmarks.TaskSequenceBenchmark * * Which means "20 iterations", "20 warm-up iterations", "4 forks", "2 thread". * Please note that benchmarks should be usually executed at least in * 10 iterations (as a rule of thumb), but more is better. */ @Measurement(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS) @Warmup(iterations = 10, time = 3, timeUnit = TimeUnit.SECONDS) @Fork(2) @Threads(1) @State(Scope.Thread) @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS) class TaskSequenceBenchmark { @Param(Array("100", "1000")) var count: Int = _ val parallelism: Int = 10 @Benchmark def catsSequence(): Long = { val tasks = (0 until count).map(_ => IO(1)).toList val result = tasks.sequence.map(_.sum.toLong) result.unsafeRunSync() } @Benchmark def catsParSequence(): Long = { val tasks = (0 until count).map(_ => IO(1)).toList val result = tasks.parSequence.map(_.sum.toLong) result.unsafeRunSync() } @Benchmark def catsParSequenceN(): Long = { val tasks = (0 until count).map(_ => IO(1)).toList val result = tasks.parSequenceN(parallelism.toLong).map(_.sum.toLong) result.unsafeRunSync() } @Benchmark def monixSequence(): Long = { val tasks = (0 until count).map(_ => Task.eval(1)).toList val result = Task.sequence(tasks).map(_.sum.toLong) result.runSyncUnsafe() } @Benchmark def monixParSequence(): Long = { val tasks = (0 until count).map(_ => Task.eval(1)).toList val result = Task.parSequence(tasks).map(_.sum.toLong) result.runSyncUnsafe() } @Benchmark def monixParSequenceUnordered(): Long = { val tasks = (0 until count).map(_ => Task.eval(1)).toList val result = Task.parSequenceUnordered(tasks).map(_.sum.toLong) result.runSyncUnsafe() } @Benchmark def monixParSequenceN(): Long = { val tasks = (0 until count).map(_ => Task.eval(1)).toList val result = Task.parSequenceN(parallelism)(tasks).map(_.sum.toLong) result.runSyncUnsafe() } @Benchmark def zioSequence(): Long = { val tasks = (0 until count).map(_ => ZIO.effectTotal(1)).toList val result = ZIO.collectAll(tasks).map(_.sum.toLong) zioUntracedRuntime.unsafeRun(result) } @Benchmark def zioParSequence(): Long = { val tasks = (0 until count).map(_ => ZIO.effectTotal(1)).toList val result = ZIO.collectAllPar(tasks).map(_.sum.toLong) zioUntracedRuntime.unsafeRun(result) } @Benchmark def zioParSequenceN(): Long = { val tasks = (0 until count).map(_ => ZIO.effectTotal(1)).toList val result = ZIO.collectAllParN(parallelism)(tasks).map(_.sum.toLong) zioUntracedRuntime.unsafeRun(result) } @Benchmark def futureSequence(): Long = { val futures = (0 until count).map(_ => Future(1)).toList val f: Future[Long] = Future.sequence(futures).map(_.sum.toLong) Await.result(f, Duration.Inf) } }
alexandru/monifu
benchmarks/shared/src/main/scala/monix/benchmarks/TaskSequenceBenchmark.scala
Scala
apache-2.0
4,415
package xyz.hyperreal.spraytemplate import akka.actor.Actor import spray.routing._ import spray.http._ import spray.json._ import DefaultJsonProtocol._ import spray.httpx.SprayJsonSupport import MediaTypes._ import scala.collection.mutable.ArrayBuffer // we don't implement our route structure directly in the service actor because // we want to be able to test it independently, without having to spin up an actor class MyServiceActor extends Actor with MyService { // the HttpService trait defines only one abstract member, which // connects the services environment to the enclosing actor or test def actorRefFactory = context // this actor only runs our route, but you could add // other things here, like request stream processing // or timeout handling def receive = runRoute(myRoute) } // this trait defines our service behavior independently from the service actor trait MyService extends HttpService { // case class Todo( id: Int, text: String, done: Boolean ) // // object TodoJsonSupport extends DefaultJsonProtocol with SprayJsonSupport { // implicit val PortofolioFormats: RootJsonFormat[Todo] = jsonFormat3(Todo) // } // // import TodoJsonSupport._ // // var todos = ArrayBuffer[Todo]( Todo(1, "first item", false), Todo(2, "second item", false) ) // var id = 3 val myRoute = pathPrefix("css") { getFromResourceDirectory("resources/public") } ~ pathPrefix("js") { getFromResourceDirectory("public") } ~ pathSuffixTest( ".*html"r ) { _ => getFromResourceDirectory( "public" ) } ~ pathPrefix("coffee") { getFromResourceDirectory("public/js") } ~ pathPrefix("webjars") { getFromResourceDirectory("META-INF/resources/webjars") } ~ path( "" ) { getFromResource( "public/index.html" ) } }
edadma/angularjs-sass-coffeescript-spray-template
src/main/scala/MyService.scala
Scala
mit
1,763
/** * Copyright 2013 Gianluca Amato * * This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains * JANDOM is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * JANDOM is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty ofa * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with JANDOM. If not, see <http://www.gnu.org/licenses/>. */ package it.unich.jandom.targets.jvmasm import org.objectweb.asm.tree._ /** * This exception is generated by the ASM analyzer when an unknown opcode is * encountered. * @param node the ASM instruction which has generated the fault * @author Gianluca Amato <gamato@unich.it> * */ case class UnsupportedASMInsnException(val node: AbstractInsnNode) extends Exception("Unsupported ASM instruction exception "+node)
amato-gianluca/Jandom
core/src/main/scala/it/unich/jandom/targets/jvmasm/UnsupportedASMInsnException.scala
Scala
lgpl-3.0
1,166
object Script { val main = composelistT( tryruleT(seq)*, substT*, nonarithcloseT ) }
keymaerad/KeYmaeraD
examples/medrobot/nonpr_ex2.dl.scala
Scala
bsd-3-clause
103
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming import scala.collection.JavaConverters._ import scala.language.implicitConversions import org.scalatest.BeforeAndAfter import org.apache.spark.sql._ import org.apache.spark.sql.sources.v2.DataSourceOptions import org.apache.spark.sql.streaming.{OutputMode, StreamTest} import org.apache.spark.sql.types.{IntegerType, StructField, StructType} import org.apache.spark.util.Utils class MemorySinkSuite extends StreamTest with BeforeAndAfter { import testImplicits._ after { sqlContext.streams.active.foreach(_.stop()) } test("directly add data in Append output mode") { implicit val schema = new StructType().add(new StructField("value", IntegerType)) val sink = new MemorySink(schema, OutputMode.Append, DataSourceOptions.empty()) // Before adding data, check output assert(sink.latestBatchId === None) checkAnswer(sink.latestBatchData, Seq.empty) checkAnswer(sink.allData, Seq.empty) // Add batch 0 and check outputs sink.addBatch(0, 1 to 3) assert(sink.latestBatchId === Some(0)) checkAnswer(sink.latestBatchData, 1 to 3) checkAnswer(sink.allData, 1 to 3) // Add batch 1 and check outputs sink.addBatch(1, 4 to 6) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 6) checkAnswer(sink.allData, 1 to 6) // new data should get appended to old data // Re-add batch 1 with different data, should not be added and outputs should not be changed sink.addBatch(1, 7 to 9) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 6) checkAnswer(sink.allData, 1 to 6) // Add batch 2 and check outputs sink.addBatch(2, 7 to 9) assert(sink.latestBatchId === Some(2)) checkAnswer(sink.latestBatchData, 7 to 9) checkAnswer(sink.allData, 1 to 9) } test("directly add data in Append output mode with row limit") { implicit val schema = new StructType().add(new StructField("value", IntegerType)) var optionsMap = new scala.collection.mutable.HashMap[String, String] optionsMap.put(MemorySinkBase.MAX_MEMORY_SINK_ROWS, 5.toString()) var options = new DataSourceOptions(optionsMap.toMap.asJava) val sink = new MemorySink(schema, OutputMode.Append, options) // Before adding data, check output assert(sink.latestBatchId === None) checkAnswer(sink.latestBatchData, Seq.empty) checkAnswer(sink.allData, Seq.empty) // Add batch 0 and check outputs sink.addBatch(0, 1 to 3) assert(sink.latestBatchId === Some(0)) checkAnswer(sink.latestBatchData, 1 to 3) checkAnswer(sink.allData, 1 to 3) // Add batch 1 and check outputs sink.addBatch(1, 4 to 6) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 5) checkAnswer(sink.allData, 1 to 5) // new data should not go over the limit } test("directly add data in Update output mode") { implicit val schema = new StructType().add(new StructField("value", IntegerType)) val sink = new MemorySink(schema, OutputMode.Update, DataSourceOptions.empty()) // Before adding data, check output assert(sink.latestBatchId === None) checkAnswer(sink.latestBatchData, Seq.empty) checkAnswer(sink.allData, Seq.empty) // Add batch 0 and check outputs sink.addBatch(0, 1 to 3) assert(sink.latestBatchId === Some(0)) checkAnswer(sink.latestBatchData, 1 to 3) checkAnswer(sink.allData, 1 to 3) // Add batch 1 and check outputs sink.addBatch(1, 4 to 6) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 6) checkAnswer(sink.allData, 1 to 6) // new data should get appended to old data // Re-add batch 1 with different data, should not be added and outputs should not be changed sink.addBatch(1, 7 to 9) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 6) checkAnswer(sink.allData, 1 to 6) // Add batch 2 and check outputs sink.addBatch(2, 7 to 9) assert(sink.latestBatchId === Some(2)) checkAnswer(sink.latestBatchData, 7 to 9) checkAnswer(sink.allData, 1 to 9) } test("directly add data in Complete output mode") { implicit val schema = new StructType().add(new StructField("value", IntegerType)) val sink = new MemorySink(schema, OutputMode.Complete, DataSourceOptions.empty()) // Before adding data, check output assert(sink.latestBatchId === None) checkAnswer(sink.latestBatchData, Seq.empty) checkAnswer(sink.allData, Seq.empty) // Add batch 0 and check outputs sink.addBatch(0, 1 to 3) assert(sink.latestBatchId === Some(0)) checkAnswer(sink.latestBatchData, 1 to 3) checkAnswer(sink.allData, 1 to 3) // Add batch 1 and check outputs sink.addBatch(1, 4 to 6) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 6) checkAnswer(sink.allData, 4 to 6) // new data should replace old data // Re-add batch 1 with different data, should not be added and outputs should not be changed sink.addBatch(1, 7 to 9) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 6) checkAnswer(sink.allData, 4 to 6) // Add batch 2 and check outputs sink.addBatch(2, 7 to 9) assert(sink.latestBatchId === Some(2)) checkAnswer(sink.latestBatchData, 7 to 9) checkAnswer(sink.allData, 7 to 9) } test("directly add data in Complete output mode with row limit") { implicit val schema = new StructType().add(new StructField("value", IntegerType)) var optionsMap = new scala.collection.mutable.HashMap[String, String] optionsMap.put(MemorySinkBase.MAX_MEMORY_SINK_ROWS, 5.toString()) var options = new DataSourceOptions(optionsMap.toMap.asJava) val sink = new MemorySink(schema, OutputMode.Complete, options) // Before adding data, check output assert(sink.latestBatchId === None) checkAnswer(sink.latestBatchData, Seq.empty) checkAnswer(sink.allData, Seq.empty) // Add batch 0 and check outputs sink.addBatch(0, 1 to 3) assert(sink.latestBatchId === Some(0)) checkAnswer(sink.latestBatchData, 1 to 3) checkAnswer(sink.allData, 1 to 3) // Add batch 1 and check outputs sink.addBatch(1, 4 to 10) assert(sink.latestBatchId === Some(1)) checkAnswer(sink.latestBatchData, 4 to 8) checkAnswer(sink.allData, 4 to 8) // new data should replace old data } test("registering as a table in Append output mode") { val input = MemoryStream[Int] val query = input.toDF().writeStream .format("memory") .outputMode("append") .queryName("memStream") .start() input.addData(1, 2, 3) query.processAllAvailable() checkDataset( spark.table("memStream").as[Int], 1, 2, 3) input.addData(4, 5, 6) query.processAllAvailable() checkDataset( spark.table("memStream").as[Int], 1, 2, 3, 4, 5, 6) query.stop() } test("registering as a table in Complete output mode") { val input = MemoryStream[Int] val query = input.toDF() .groupBy("value") .count() .writeStream .format("memory") .outputMode("complete") .queryName("memStream") .start() input.addData(1, 2, 3) query.processAllAvailable() checkDatasetUnorderly( spark.table("memStream").as[(Int, Long)], (1, 1L), (2, 1L), (3, 1L)) input.addData(4, 5, 6) query.processAllAvailable() checkDatasetUnorderly( spark.table("memStream").as[(Int, Long)], (1, 1L), (2, 1L), (3, 1L), (4, 1L), (5, 1L), (6, 1L)) query.stop() } test("registering as a table in Update output mode") { val input = MemoryStream[Int] val query = input.toDF().writeStream .format("memory") .outputMode("update") .queryName("memStream") .start() input.addData(1, 2, 3) query.processAllAvailable() checkDataset( spark.table("memStream").as[Int], 1, 2, 3) input.addData(4, 5, 6) query.processAllAvailable() checkDataset( spark.table("memStream").as[Int], 1, 2, 3, 4, 5, 6) query.stop() } test("MemoryPlan statistics") { implicit val schema = new StructType().add(new StructField("value", IntegerType)) val sink = new MemorySink(schema, OutputMode.Append, DataSourceOptions.empty()) val plan = new MemoryPlan(sink) // Before adding data, check output checkAnswer(sink.allData, Seq.empty) assert(plan.stats.sizeInBytes === 0) sink.addBatch(0, 1 to 3) plan.invalidateStatsCache() assert(plan.stats.sizeInBytes === 36) sink.addBatch(1, 4 to 6) plan.invalidateStatsCache() assert(plan.stats.sizeInBytes === 72) } ignore("stress test") { // Ignore the stress test as it takes several minutes to run (0 until 1000).foreach { _ => val input = MemoryStream[Int] val query = input.toDF().writeStream .format("memory") .queryName("memStream") .start() input.addData(1, 2, 3) query.processAllAvailable() checkDataset( spark.table("memStream").as[Int], 1, 2, 3) input.addData(4, 5, 6) query.processAllAvailable() checkDataset( spark.table("memStream").as[Int], 1, 2, 3, 4, 5, 6) query.stop() } } test("error when no name is specified") { val error = intercept[AnalysisException] { val input = MemoryStream[Int] val query = input.toDF().writeStream .format("memory") .start() } assert(error.message contains "queryName must be specified") } test("error if attempting to resume specific checkpoint") { val location = Utils.createTempDir(namePrefix = "steaming.checkpoint").getCanonicalPath val input = MemoryStream[Int] val query = input.toDF().writeStream .format("memory") .queryName("memStream") .option("checkpointLocation", location) .start() input.addData(1, 2, 3) query.processAllAvailable() query.stop() intercept[AnalysisException] { input.toDF().writeStream .format("memory") .queryName("memStream") .option("checkpointLocation", location) .start() } } private def checkAnswer(rows: Seq[Row], expected: Seq[Int])(implicit schema: StructType): Unit = { checkAnswer( sqlContext.createDataFrame(sparkContext.makeRDD(rows), schema), intsToDF(expected)(schema)) } private implicit def intsToDF(seq: Seq[Int])(implicit schema: StructType): DataFrame = { require(schema.fields.size === 1) sqlContext.createDataset(seq).toDF(schema.fieldNames.head) } }
lxsmnv/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/MemorySinkSuite.scala
Scala
apache-2.0
11,549
/* * Copyright 2008 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions * and limitations under the License. */ package org.wso2.as package model import java.util.Date import javax.persistence._ import org.hibernate.annotations.Type /** This class represents a book that we might want to read. */ @Entity class Book { @Id @GeneratedValue(strategy = GenerationType.AUTO) var id : Long = _ @Column(unique = true, nullable = false) var title : String = "" @Temporal(TemporalType.DATE) @Column(nullable = true) var published : Date = new Date() @Type(`type` = "org.wso2.as.model.GenreType") var genre : Genre.Value = Genre.unknown @ManyToOne(optional = false) var author : Author = _ }
wso2as-developer/scala-samples
lift-jpa/spa/src/main/scala/org/wso2/as/model/Book.scala
Scala
apache-2.0
1,213
/* Copyright (c) 2016 Lucas Satabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lingua package lexikon package phases import parser._ import untyped._ import fastparse.core.{ Parsed, ParseError } class Parser(inputs: Map[String, String]) extends Phase[CompileOptions, Seq[DikoUnit]](Some("parser")) { def process(options: CompileOptions, reporter: Reporter): Seq[DikoUnit] = (for { (name, input) <- inputs unit <- DikoParser.unit(name).parse(input) match { case Parsed.Success(unit, _) => Some(unit) case failure @ Parsed.Failure(_, offset, extra) => reporter.error(ParseError(failure).getMessage, name, offset) None } } yield unit).toSeq }
satabin/lingua
lexikon/src/main/scala/lingua/lexikon/phases/Parser.scala
Scala
apache-2.0
1,248
package memnets.ml import breeze.stats.distributions.Uniform import scala.math.round object DataGens { val uniform = new Uniform(0.0, 1.0) def cont2Features(feats: Int, size: Int, prob: Double = 0.95): Data = contFunc(feats, size, prob) { pt => pt(feats / 4) = 1.0 pt(feats / 2) = 1.0 } def cont3Features(feats: Int, size: Int, prob: Double = 0.95): Data = contFunc(feats, size, prob) { pt => pt(feats / 4) = 1.0 pt(feats / 2) = 1.0 pt(feats - 1) = 1.0 } def contFunc(feats: Int, size: Int, prob: Double = 0.95)(f: Array[Double] => Unit): Data = { val features = Array.tabulate[Feature](feats + 1) { i => if (i == 0) Output(i, "cat", "Y", "N") else ContFeature(i, "f" + i, 0.0, 1.0) } val rawData = Array.tabulate(size) { i => val pt = Array.tabulate(feats + 1) { j => uniform.draw() } val cat = round(pt(0)) pt(0) = cat // Pos(+) example is 0.0, Neg(-) is 1.0 if (cat < 1.0 && pt(feats / 4) <= prob) f(pt) pt } val data = Data(features, rawData) data.randomize() data } def binary2Features(feats: Int, size: Int, prob: Double = 0.95) = binaryXFeatures(feats, size, prob) { pt => pt(feats / 4) = 1.0 pt(feats / 2) = 1.0 } def binary3Features(feats: Int, size: Int, prob: Double = 0.95) = binaryXFeatures(feats, size, prob) { pt => pt(feats / 4) = 1.0 pt(feats / 2) = 1.0 pt(feats - 1) = 1.0 } def binaryXFeatures(feats: Int, size: Int, prob: Double = 0.95)(f: Array[Double] => Unit): Data = { val features = Array.tabulate[Feature](feats + 1) { i => if (i == 0) Output(i, "cat", "Y", "N") else new BinFeature(i) } // allocate once. tabulate copies... val rawData = Array.ofDim[Double](size, feats + 1) var i = 0 while (i < rawData.length) { val pt = rawData(i) var j = 0 while (j < pt.length) { pt(j) = round(uniform.draw) j += 1 } val cat = round(uniform.draw) pt(0) = cat // Pos+ example is 0.0, Neg- is 1.0 if (cat < 1.0 && uniform.draw <= prob) f(pt) i += 1 } val data = Data(features, rawData) data.randomize() data } }
MemoryNetworks/memnets
api/src/main/scala/memnets/ml/DataGens.scala
Scala
apache-2.0
2,256
package io.sqooba.oss.timeseries.immutable import io.sqooba.oss.timeseries.TimeSeriesTestBench import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpec class GorillaBlockTimeSeriesSpec extends AnyFlatSpec with Matchers with TimeSeriesTestBench { private val entries = Seq(TSEntry(1, 1.2, 2), TSEntry(10, -2d, 2), TSEntry(11, 1d, 1)) "GorillaBlockTimeSeries" should "choose its builder type by the type of the result" in { val series = GorillaBlockTimeSeries.ofOrderedEntriesSafe(entries) val gorillaBuilder = series.newBuilder[Double]() gorillaBuilder ++= entries gorillaBuilder.result() shouldBe a[GorillaBlockTimeSeries] val otherBuilder = series.newBuilder[Any]() otherBuilder ++= Seq(TSEntry(1, "hi", 20), TSEntry(2, 1324, 20)) otherBuilder.result() shouldBe a[VectorTimeSeries[_]] } it should "choose its result implementation by the result type of a map" in { val series = GorillaBlockTimeSeries.ofOrderedEntriesSafe( Seq(TSEntry(1, 1d, 1), TSEntry(2, 4d, 1)) ) series.map[Double](_ * 2) shouldBe a[GorillaBlockTimeSeries] series.map(_ => "hello", compress = false) should not be a[GorillaBlockTimeSeries] series.map(_ => "hello", compress = false) shouldBe a[VectorTimeSeries[_]] series.map(_ => "hello") shouldBe a[TSEntry[_]] } it should behave like nonEmptyNonSingletonDoubleTimeSeries( GorillaBlockTimeSeries.ofOrderedEntriesSafe(_) ) it should behave like nonEmptyNonSingletonDoubleTimeSeriesWithCompression( GorillaBlockTimeSeries.ofOrderedEntriesSafe(_) ) it should "have entries that are traversable multiple times" in { val inputEntries = Seq(TSEntry(1, 1d, 1), TSEntry(2, 4d, 1)) val series = GorillaBlockTimeSeries.ofOrderedEntriesSafe(inputEntries) series.entries.isTraversableAgain shouldBe true val es = series.entries noException should be thrownBy { es.toVector shouldBe es.toVector es.toSeq shouldBe inputEntries } } }
Shastick/scala-timeseries-lib
src/test/scala/io/sqooba/oss/timeseries/immutable/GorillaBlockTimeSeriesSpec.scala
Scala
apache-2.0
2,030
package org.jetbrains.plugins.scala package debugger import com.intellij.debugger.NameMapper import com.intellij.openapi.application.ApplicationManager import com.intellij.openapi.util.Computable import com.intellij.psi.PsiClass import org.jetbrains.annotations.NotNull import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition, ScTrait} /** *@author ilyas */ class ScalaJVMNameMapper extends NameMapper { def getQualifiedName(@NotNull clazz: PsiClass): String = { ApplicationManager.getApplication.runReadAction(new Computable[String] { def compute: String = { clazz match { case obj: ScObject => obj.qualifiedName + "$" case tr: ScTrait => tr.qualifiedName case templDef: ScTemplateDefinition => templDef.qualifiedName case psiClass => psiClass.getQualifiedName } } }) } }
triggerNZ/intellij-scala
src/org/jetbrains/plugins/scala/debugger/ScalaJVMNameMapper.scala
Scala
apache-2.0
896
package controllers import java.util.UUID import play.api.data.Form import play.api.libs.json.Json import play.api.mvc.{Action, AnyContent, Controller} import security.UserAuthAction import services.{ReadService, TagEventProducer} import scala.util.{Failure, Success} /** * * TagController class * <p/> * Description... * * @author artem klevakin */ class TagController(tagEventProducer: TagEventProducer, userAuthAction: UserAuthAction, readService: ReadService) extends Controller { case class CreateTagData(text: String) case class DeleteTagData(id: UUID) import play.api.data.Forms._ val createTagForm = Form { mapping( "text" -> nonEmptyText )(CreateTagData.apply)(CreateTagData.unapply) } val deleteTagForm = Form { mapping( "id" -> uuid )(DeleteTagData.apply)(DeleteTagData.unapply) } def getTags: Action[AnyContent] = Action { val tagsT = readService.getAllTags tagsT match { case Failure(th) => InternalServerError case Success(tags) => Ok(Json.toJson(tags)) } } def createTag(): Action[AnyContent] = userAuthAction { implicit request => createTagForm.bindFromRequest.fold( formWithErrors => BadRequest, data => { tagEventProducer.createTag(data.text, request.user.userId) Ok } ) } def deleteTag(): Action[AnyContent] = userAuthAction { implicit request => deleteTagForm.bindFromRequest.fold( formWithErrors => BadRequest, data => { tagEventProducer.deleteTag(data.id, request.user.userId) Ok } ) } }
getArtemUsername/play-and-events
app/controllers/TagController.scala
Scala
mit
1,676
object Test { def main(args: Array[String]): Unit = { val u = null.asInstanceOf[Unit] val b = null.asInstanceOf[Byte] val c = null.asInstanceOf[Char] val s = null.asInstanceOf[Short] val i = null.asInstanceOf[Int] val l = null.asInstanceOf[Long] val f = null.asInstanceOf[Float] val d = null.asInstanceOf[Double] val str = null.asInstanceOf[String] println(u) println(b) println(c) println(s) println(i) println(l) println(f) println(d) println(str) } }
folone/dotty
tests/run/nullAsInstanceOf.scala
Scala
bsd-3-clause
531
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.sumologic.sumobot.plugins import akka.actor.{Actor, ActorLogging, ActorRef} import com.sumologic.sumobot.brain.BlockingBrain import com.sumologic.sumobot.core.Bootstrap import com.sumologic.sumobot.core.model._ import com.sumologic.sumobot.plugins.BotPlugin.{InitializePlugin, PluginAdded, PluginRemoved} import com.sumologic.sumobot.quartz.QuartzExtension import com.typesafe.config.Config import org.apache.http.HttpResponse import org.apache.http.client.methods.{HttpGet, HttpUriRequest} import slack.models.{Group, Im, User, Channel => ClientChannel} import slack.rtm.RtmState import java.net.URLEncoder import java.util.concurrent.{Executors, TimeoutException} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal import scala.util.matching.Regex import scala.util.{Failure, Success} object BotPlugin { case object RequestHelp case class PluginAdded(plugin: ActorRef, help: String) case class PluginRemoved(plugin: ActorRef) case class InitializePlugin(state: RtmState, brain: ActorRef, pluginRegistry: ActorRef) def matchText(regex: String): Regex = ("(?i)(?s)" + regex).r } abstract class BotPlugin extends Actor with ActorLogging with Emotions { type ReceiveIncomingMessage = PartialFunction[IncomingMessage, Unit] type ReceiveReaction = PartialFunction[Reaction, Unit] protected var state: RtmState = _ protected var brain: ActorRef = _ protected var pluginRegistry: ActorRef = _ // For plugins to implement. protected def receiveIncomingMessage: ReceiveIncomingMessage protected def receiveReaction: ReceiveReaction = Map.empty protected def help: String // Helpers for plugins to use. protected def sendMessage(msg: OutgoingMessage): Unit = context.system.eventStream.publish(msg) protected def sendMessage(msg: OutgoingMessageWithAttachments): Unit = context.system.eventStream.publish(msg) protected def sendImage(im: OutgoingImage): Unit = context.system.eventStream.publish(im) protected def responseConcurrency = 10 protected def responseTimeout = 10.seconds implicit protected val responseExecutionContext = ExecutionContext.fromExecutor( Executors.newFixedThreadPool(responseConcurrency)) class RichIncomingMessage(msg: IncomingMessage) { def response(text: String, inThread: Boolean = false) = { val threadTs = if (inThread) { msg.threadTimestamp.orElse(Some(msg.idTimestamp)) } else { None } OutgoingMessage(msg.channel, responsePrefix(inThread) + text, threadTs) } def message(text: String) = OutgoingMessage(msg.channel, text) def say(text: String) = sendMessage(message(text)) def respond(text: String, inThread: Boolean = false) = sendMessage(response(text, inThread)) private def responsePrefix(inThread: Boolean): String = if (msg.channel.isInstanceOf[InstantMessageChannel] || inThread) { "" } else { s"${msg.sentBy.slackReference}: " } def scheduleResponse(delay: FiniteDuration, text: String): Unit = scheduleOutgoingMessage(delay, response(text)) def scheduleMessage(delay: FiniteDuration, text: String): Unit = scheduleOutgoingMessage(delay, message(text)) def scheduleOutgoingMessage(delay: FiniteDuration, outgoingMessage: OutgoingMessage): Unit = { context.system.scheduler.scheduleOnce(delay, new Runnable() { override def run(): Unit = sendMessage(outgoingMessage) }) } def respondInFuture(body: IncomingMessage => OutgoingMessage): Unit = { respondAsync((x: IncomingMessage) => Future[OutgoingMessage]{body(x)}) } def respondAsync(body: IncomingMessage => Future[OutgoingMessage]): Unit = { val timeout = akka.pattern.after(responseTimeout, using = context.system.scheduler)( Future.failed[OutgoingMessage](new TimeoutException("Response timed out"))) val response: Future[OutgoingMessage] = Future.firstCompletedOf[OutgoingMessage](Seq(timeout, body(msg))) response.onComplete{ case Failure(NonFatal(e)) => log.error(e, "Execution failed.") msg.response("Execution failed.") case Success(message) => sendMessage(message) } } def httpGet(url: String)(func: (IncomingMessage, HttpResponse) => OutgoingMessage): Unit = http(new HttpGet(url))(func) def http(request: HttpUriRequest)(func: (IncomingMessage, HttpResponse) => OutgoingMessage): Unit = { respondInFuture { (incoming: IncomingMessage) => val client = HttpClientWithTimeOut.client() func(incoming, client.execute(request)) } } } implicit def enrichIncomingMessage(msg: IncomingMessage): RichIncomingMessage = new RichIncomingMessage(msg) implicit def clientToPublicChannel(channel: ClientChannel): PublicChannel = PublicChannel(channel.id, channel.name) implicit def clientToGroupChannel(group: Group): GroupChannel = GroupChannel(group.id, group.name) implicit def clientToInstanceMessageChannel(im: Im): InstantMessageChannel = InstantMessageChannel(im.id, state.users.find(_.id == im.user).get) protected val UserId = "<@(\\\\w+)>" protected val ChannelId = "<#(C\\\\w+)\\\\|.*>" protected def mention(user: User): String = s"<@${user.id}>" protected def matchText(regex: String): Regex = BotPlugin.matchText(regex) protected def blockingBrain: BlockingBrain = new BlockingBrain(brain) protected def userById(id: String): Option[User] = state.users.find(_.id == id) protected[plugins] def userByName(name: String): Option[User] = state.users.find(_.name == name) protected def publicChannel(name: String): Option[PublicChannel] = state.channels.find(_.name == name).map(clientToPublicChannel) protected def groupChannel(name: String): Option[GroupChannel] = state.groups.find(_.name == name).map(clientToGroupChannel) protected[plugins] def instantMessageChannel(name: String): Option[InstantMessageChannel] = { for (user <- state.users.find(_.name == name); im <- state.ims.find(_.user == user.id)) yield clientToInstanceMessageChannel(im) } protected def channelForName(name: String): Option[Channel] = { Seq(publicChannel(name), groupChannel(name), instantMessageChannel(name)).flatten.headOption } protected def urlEncode(string: String): String = URLEncoder.encode(string, "utf-8") protected def scheduleActorMessage(name: String, cronExpression: String, message: AnyRef): Unit = { QuartzExtension(context.system).scheduleMessage(name, cronExpression, self, message) } protected def config: Config = context.system.settings.config.getConfig(s"plugins.${self.path.name}") // Implementation. Most plugins should not override. override final def preStart(): Unit = { context.system.eventStream.subscribe(self, classOf[IncomingMessage]) context.system.eventStream.subscribe(self, classOf[Reaction]) Bootstrap.receptionist.foreach(_ ! PluginAdded(self, help)) pluginPreStart() } protected def pluginPreStart(): Unit = {} override final def postStop(): Unit = { Bootstrap.receptionist.foreach(_ ! PluginRemoved(self)) context.system.eventStream.unsubscribe(self) pluginPostStop() } protected def pluginPostStop(): Unit = {} private final def receiveIncomingMessageInternal: ReceiveIncomingMessage = receiveIncomingMessage orElse { case ignore => } private final def receiveReactionInternal: ReceiveReaction = receiveReaction orElse { case ignore => } override def receive: Receive = uninitialized orElse pluginReceive private def uninitialized: Receive = { case InitializePlugin(newState, newBrain, newPluginRegistry) => this.state = newState this.brain = newBrain this.pluginRegistry = newPluginRegistry this.initialize() context.become(initialized orElse pluginReceive) } protected final def initialized: Receive = { case message@IncomingMessage(text, _, _, _, _, _, _) => receiveIncomingMessageInternal(message) case reaction@Reaction(_, _, _, _) => receiveReactionInternal(reaction) } protected def pluginReceive: Receive = Map.empty protected def initialize(): Unit = {} }
SumoLogic/sumobot
src/main/scala/com/sumologic/sumobot/plugins/BotPlugin.scala
Scala
apache-2.0
9,067
package test { object NotNoPrefix { final class Id[A](val a: A) extends AnyVal final class Ids[A](val as: Seq[A]) extends AnyVal final class Bid[A, B](val ab: Map[A, B]) extends AnyVal } } object Test extends App { import test.NotNoPrefix._ def check[A](cls: Class[A])(implicit tag: reflect.ClassTag[A]): Unit = { val suffix = if (cls != tag.runtimeClass) " != " + tag.runtimeClass else "" println(cls.toString + suffix) } check(classOf[Id[Int]]) check(classOf[Id[_]]) check(classOf[Ids[Int]]) check(classOf[Ids[_]]) check(classOf[Bid[Int, Int]]) check(classOf[Bid[Int, _]]) check(classOf[Bid[_, Int]]) check(classOf[Bid[_, _]]) type Iddy[A] = Id[A] type Idsy[A] = Ids[A] type Biddy[A, B] = Bid[A, B] type Biddouble[A] = Bid[A, Double] type Bixt[L] = Biddouble[_] type Bixty = Bixt[_] check(classOf[Iddy[Int]]) check(classOf[Iddy[_]]) check(classOf[Idsy[Int]]) check(classOf[Idsy[_]]) check(classOf[Biddy[Int, Int]]) check(classOf[Biddy[Int, _]]) check(classOf[Biddy[_, Int]]) check(classOf[Biddy[_, _]]) check(classOf[Biddouble[Int]]) check(classOf[Biddouble[_]]) check(classOf[Bixt[Int]]) check(classOf[Bixt[_]]) check(classOf[Bixty]) }
scala/scala
test/files/run/t10551.scala
Scala
apache-2.0
1,237
package com.twitter.inject.server.tests import com.twitter.finagle.http.Status import com.twitter.inject.server.{EmbeddedTwitterServer, FeatureTest, TwitterServer} /** Test an injectable TwitterServer with the [[FeatureTest]] trait */ class FeatureTestTest extends FeatureTest { override val server: EmbeddedTwitterServer = new EmbeddedTwitterServer( twitterServer = new TwitterServer {}, disableTestLogging = true ).bind[String].toInstance("helloworld") /** * Explicitly start the server before all tests, close will be attempted by * [[com.twitter.inject.server.FeatureTestMixin]] in `afterAll`. */ override def beforeAll(): Unit = { server.start() } test("TwitterServer#starts up") { server.assertHealthy() } test("TwitterServer#stats receivers") { server.statsReceiver should equal(server.inMemoryStatsReceiver) } test("TwitterServer#feature test") { server.httpGetAdmin( "/admin/lint.json", andExpect = Status.Ok ) server.httpGetAdmin( "/admin/registry.json", andExpect = Status.Ok ) } test("TwitterServer#bind test") { server.injector.instance[String] should be("helloworld") } }
twitter/finatra
inject/inject-server/src/test/scala/com/twitter/inject/server/tests/FeatureTestTest.scala
Scala
apache-2.0
1,204
package codacy.plugins.test import java.io.{File => JFile} import java.nio.file.Path import better.files._ import com.codacy.analysis.core.model.{FileError, Issue, Pattern, ToolResult, ToolSpec} import com.codacy.plugins.api._ import com.codacy.plugins.api.languages.{Language, Languages} import com.codacy.plugins.results.traits.DockerTool import com.codacy.plugins.utils.DockerHelper import wvlet.log.LogSupport final case class DockerImage(name: String, version: String) { override def toString: String = { s"$name:$version" } } trait ITest extends LogSupport { val opt: String def run(docsDirectory: JFile, dockerImage: DockerImage, optArgs: Seq[String]): Boolean protected def findLanguages(testsDirectory: JFile): Set[Language] = { val languagesFromProperties = sys.props.get("codacy.tests.languages").map(_.split(",").flatMap(Languages.fromName).to[Set]) lazy val languagesFromFiles: Set[Language] = (for { testFile <- new TestFilesParser(testsDirectory).getTestFiles language <- Languages.fromName(testFile.language.toString) } yield language)(collection.breakOut) languagesFromProperties.getOrElse(languagesFromFiles) } protected def createDockerTool(languages: Set[Language], dockerImage: DockerImage): DockerTool = { val dockerImageName = dockerImage.name val dockerImageVersion = dockerImage.version new DockerTool(dockerName = dockerImageName, isDefault = true, languages = languages, name = dockerImageName, shortName = dockerImageName, uuid = dockerImageName, documentationUrl = "", sourceCodeUrl = "", prefix = "", needsCompilation = false, hasUIConfiguration = true) { override val dockerImageName = s"${dockerImage.name}:${dockerImageVersion}" override def toolVersion(dockerHelper: DockerHelper): Option[String] = Some(dockerImageVersion) } } protected def createToolSpec(languages: Set[Language], dockerImage: DockerImage): ToolSpec = { val dockerImageName = dockerImage.name val dockerImageVersion = dockerImage.version new ToolSpec(uuid = dockerImageName, dockerImage = s"${dockerImage.name}:${dockerImageVersion}", version = dockerImageVersion, languages = languages, name = dockerImageName, shortName = dockerImageName, documentationUrl = Some(""), sourceCodeUrl = Some(""), prefix = "", needsCompilation = false, hasConfigFile = true, isClientSide = false, hasUIConfiguration = true, isDefault = true, configFilenames = Set.empty) } protected def filterResults(spec: Option[results.Tool.Specification], sourcePath: Path, files: Seq[JFile], patterns: Seq[Pattern], toolResults: Set[ToolResult]): Set[Issue] = { val filtered = filterFileErrors(toolResults) val filteredFromSpec = filterResultsFromSpecPatterns(filtered, spec) val filteredFromFiles = filterResultsFromFiles(filteredFromSpec, files, sourcePath) val filteredFromPatterns = filterResultsFromPatterns(filteredFromSpec, patterns) filteredFromFiles.intersect(filteredFromPatterns) } private def filterResultsFromSpecPatterns(issuesResults: Set[Issue], specOpt: Option[results.Tool.Specification]) = { specOpt.fold(issuesResults) { spec => val specPatternIds: Set[results.Pattern.Id] = spec.patterns.map(_.patternId) issuesResults.filter(issue => specPatternIds.contains(issue.patternId)) } } private def filterResultsFromPatterns(issuesResults: Set[Issue], patterns: Seq[Pattern]) = { val (filteredPatternResults, otherPatternsResults) = issuesResults.partition { result => patterns.map(_.id).contains(result.patternId.value) } if (otherPatternsResults.nonEmpty) { error(s"Some results returned were not requested by the test and were discarded!") info(s""" |Extra results returned: |* ${otherPatternsResults.map(_.patternId.value).mkString(", ")} | |Check the results returned: | * The tool should only return results requested in the configuration | * The results patternIds should match the names listed in the tools /docs/patterns.json """.stripMargin) } filteredPatternResults } private def filterResultsFromFiles(issuesResults: Set[Issue], files: Seq[JFile], sourcePath: Path) = { val relativeFiles = files.map(file => sourcePath.relativize(file.getAbsoluteFile.toPath).toString) val (filteredFileResults, otherFilesResults) = issuesResults.partition { result => relativeFiles.contains(result.filename.toString) } if (otherFilesResults.nonEmpty) { error(s"Some results are not in the files requested and were discarded!") info(s""" |Extra files: | * ${otherFilesResults.map(_.filename).mkString(", ")} | |Check the paths returned: | * The tool should only return results for the files requested | * The files should be relative to /src (ex: /src/dir/file.js -> dir/file.js) """.stripMargin) } filteredFileResults } private def filterFileErrors(results: Set[ToolResult]) = { val (issuesResults: Set[Issue], fileErrorsResults: Set[FileError]) = results.foldLeft((Set.empty[Issue], Set.empty[FileError])) { case ((issues, fileErrors), res) => res match { case issue: Issue => (issues + issue, fileErrors) case fileError: FileError => (issues, fileErrors + fileError) } } if (fileErrorsResults.nonEmpty) { error(s"Some files were not analysed because the tool failed analysing them!") info(fileErrorsResults.map(fe => s"* File: ${fe.filename}, Error: ${fe.message}").mkString("\\n")) } issuesResults } protected def multipleDirectories(testsDirectory: File, optArgs: Seq[String]) = { val selectedTest = optArgs.sliding(2).collectFirst { case Seq("--only", multipleTestDir) => multipleTestDir } selectedTest match { case Some(dirName) => Seq(testsDirectory / dirName) case None => testsDirectory.list.toSeq } } }
codacy/codacy-plugins-test
src/main/scala/codacy/plugins/test/ITest.scala
Scala
mit
6,599
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import java.util.Locale import java.util.regex.{MatchResult, Pattern} import org.apache.commons.lang3.StringEscapeUtils import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.util.{GenericArrayData, StringUtils} import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String abstract class StringRegexExpression extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant { def escape(v: String): String def matches(regex: Pattern, str: String): Boolean override def dataType: DataType = BooleanType override def inputTypes: Seq[DataType] = Seq(StringType, StringType) // try cache the pattern for Literal private lazy val cache: Pattern = right match { case x @ Literal(value: String, StringType) => compile(value) case _ => null } protected def compile(str: String): Pattern = if (str == null) { null } else { // Let it raise exception if couldn't compile the regex string Pattern.compile(escape(str)) } protected def pattern(str: String) = if (cache == null) compile(str) else cache protected override def nullSafeEval(input1: Any, input2: Any): Any = { val regex = pattern(input2.asInstanceOf[UTF8String].toString) if(regex == null) { null } else { matches(regex, input1.asInstanceOf[UTF8String].toString) } } override def sql: String = s"${left.sql} ${prettyName.toUpperCase(Locale.ROOT)} ${right.sql}" } /** * Simple RegEx pattern matching function */ @ExpressionDescription( usage = "str _FUNC_ pattern - Returns true if str matches pattern, " + "null if any arguments are null, false otherwise.", extended = """ Arguments: str - a string expression pattern - a string expression. The pattern is a string which is matched literally, with exception to the following special symbols: _ matches any one character in the input (similar to . in posix regular expressions) % matches zero or more characters in the input (similar to .* in posix regular expressions) The escape character is '\\'. If an escape character precedes a special symbol or another escape character, the following character is matched literally. It is invalid to escape any other character. Examples: > SELECT '%SystemDrive%\\Users\\John' _FUNC_ '\\%SystemDrive\\%\\\\Users%' true See also: Use RLIKE to match with standard regular expressions. """) case class Like(left: Expression, right: Expression) extends StringRegexExpression { override def escape(v: String): String = StringUtils.escapeLikeRegex(v) override def matches(regex: Pattern, str: String): Boolean = regex.matcher(str).matches() override def toString: String = s"$left LIKE $right" override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val patternClass = classOf[Pattern].getName val escapeFunc = StringUtils.getClass.getName.stripSuffix("$") + ".escapeLikeRegex" val pattern = ctx.freshName("pattern") if (right.foldable) { val rVal = right.eval() if (rVal != null) { val regexStr = StringEscapeUtils.escapeJava(escape(rVal.asInstanceOf[UTF8String].toString())) ctx.addMutableState(patternClass, pattern, s"""$pattern = ${patternClass}.compile("$regexStr");""") // We don't use nullSafeCodeGen here because we don't want to re-evaluate right again. val eval = left.genCode(ctx) ev.copy(code = s""" ${eval.code} boolean ${ev.isNull} = ${eval.isNull}; ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; if (!${ev.isNull}) { ${ev.value} = $pattern.matcher(${eval.value}.toString()).matches(); } """) } else { ev.copy(code = s""" boolean ${ev.isNull} = true; ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; """) } } else { val rightStr = ctx.freshName("rightStr") nullSafeCodeGen(ctx, ev, (eval1, eval2) => { s""" String $rightStr = ${eval2}.toString(); ${patternClass} $pattern = ${patternClass}.compile($escapeFunc($rightStr)); ${ev.value} = $pattern.matcher(${eval1}.toString()).matches(); """ }) } } } @ExpressionDescription( usage = "str _FUNC_ regexp - Returns true if `str` matches `regexp`, or false otherwise.") case class RLike(left: Expression, right: Expression) extends StringRegexExpression { override def escape(v: String): String = v override def matches(regex: Pattern, str: String): Boolean = regex.matcher(str).find(0) override def toString: String = s"$left RLIKE $right" override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val patternClass = classOf[Pattern].getName val pattern = ctx.freshName("pattern") if (right.foldable) { val rVal = right.eval() if (rVal != null) { val regexStr = StringEscapeUtils.escapeJava(rVal.asInstanceOf[UTF8String].toString()) ctx.addMutableState(patternClass, pattern, s"""$pattern = ${patternClass}.compile("$regexStr");""") // We don't use nullSafeCodeGen here because we don't want to re-evaluate right again. val eval = left.genCode(ctx) ev.copy(code = s""" ${eval.code} boolean ${ev.isNull} = ${eval.isNull}; ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; if (!${ev.isNull}) { ${ev.value} = $pattern.matcher(${eval.value}.toString()).find(0); } """) } else { ev.copy(code = s""" boolean ${ev.isNull} = true; ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; """) } } else { val rightStr = ctx.freshName("rightStr") nullSafeCodeGen(ctx, ev, (eval1, eval2) => { s""" String $rightStr = ${eval2}.toString(); ${patternClass} $pattern = ${patternClass}.compile($rightStr); ${ev.value} = $pattern.matcher(${eval1}.toString()).find(0); """ }) } } } /** * Splits str around pat (pattern is a regular expression). */ @ExpressionDescription( usage = "_FUNC_(str, regex) - Splits `str` around occurrences that match `regex`.", extended = """ Examples: > SELECT _FUNC_('oneAtwoBthreeC', '[ABC]'); ["one","two","three",""] """) case class StringSplit(str: Expression, pattern: Expression) extends BinaryExpression with ImplicitCastInputTypes { override def left: Expression = str override def right: Expression = pattern override def dataType: DataType = ArrayType(StringType) override def inputTypes: Seq[DataType] = Seq(StringType, StringType) override def nullSafeEval(string: Any, regex: Any): Any = { val strings = string.asInstanceOf[UTF8String].split(regex.asInstanceOf[UTF8String], -1) new GenericArrayData(strings.asInstanceOf[Array[Any]]) } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val arrayClass = classOf[GenericArrayData].getName nullSafeCodeGen(ctx, ev, (str, pattern) => // Array in java is covariant, so we don't need to cast UTF8String[] to Object[]. s"""${ev.value} = new $arrayClass($str.split($pattern, -1));""") } override def prettyName: String = "split" } /** * Replace all substrings of str that match regexp with rep. * * NOTE: this expression is not THREAD-SAFE, as it has some internal mutable status. */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(str, regexp, rep) - Replaces all substrings of `str` that match `regexp` with `rep`.", extended = """ Examples: > SELECT _FUNC_('100-200', '(\\d+)', 'num'); num-num """) // scalastyle:on line.size.limit case class RegExpReplace(subject: Expression, regexp: Expression, rep: Expression) extends TernaryExpression with ImplicitCastInputTypes { // last regex in string, we will update the pattern iff regexp value changed. @transient private var lastRegex: UTF8String = _ // last regex pattern, we cache it for performance concern @transient private var pattern: Pattern = _ // last replacement string, we don't want to convert a UTF8String => java.langString every time. @transient private var lastReplacement: String = _ @transient private var lastReplacementInUTF8: UTF8String = _ // result buffer write by Matcher @transient private lazy val result: StringBuffer = new StringBuffer override def nullSafeEval(s: Any, p: Any, r: Any): Any = { if (!p.equals(lastRegex)) { // regex value changed lastRegex = p.asInstanceOf[UTF8String].clone() pattern = Pattern.compile(lastRegex.toString) } if (!r.equals(lastReplacementInUTF8)) { // replacement string changed lastReplacementInUTF8 = r.asInstanceOf[UTF8String].clone() lastReplacement = lastReplacementInUTF8.toString } val m = pattern.matcher(s.toString()) result.delete(0, result.length()) while (m.find) { m.appendReplacement(result, lastReplacement) } m.appendTail(result) UTF8String.fromString(result.toString) } override def dataType: DataType = StringType override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType, StringType) override def children: Seq[Expression] = subject :: regexp :: rep :: Nil override def prettyName: String = "regexp_replace" override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val termLastRegex = ctx.freshName("lastRegex") val termPattern = ctx.freshName("pattern") val termLastReplacement = ctx.freshName("lastReplacement") val termLastReplacementInUTF8 = ctx.freshName("lastReplacementInUTF8") val termResult = ctx.freshName("result") val classNamePattern = classOf[Pattern].getCanonicalName val classNameStringBuffer = classOf[java.lang.StringBuffer].getCanonicalName val matcher = ctx.freshName("matcher") ctx.addMutableState("UTF8String", termLastRegex, s"${termLastRegex} = null;") ctx.addMutableState(classNamePattern, termPattern, s"${termPattern} = null;") ctx.addMutableState("String", termLastReplacement, s"${termLastReplacement} = null;") ctx.addMutableState("UTF8String", termLastReplacementInUTF8, s"${termLastReplacementInUTF8} = null;") ctx.addMutableState(classNameStringBuffer, termResult, s"${termResult} = new $classNameStringBuffer();") val setEvNotNull = if (nullable) { s"${ev.isNull} = false;" } else { "" } nullSafeCodeGen(ctx, ev, (subject, regexp, rep) => { s""" if (!$regexp.equals(${termLastRegex})) { // regex value changed ${termLastRegex} = $regexp.clone(); ${termPattern} = ${classNamePattern}.compile(${termLastRegex}.toString()); } if (!$rep.equals(${termLastReplacementInUTF8})) { // replacement string changed ${termLastReplacementInUTF8} = $rep.clone(); ${termLastReplacement} = ${termLastReplacementInUTF8}.toString(); } ${termResult}.delete(0, ${termResult}.length()); java.util.regex.Matcher ${matcher} = ${termPattern}.matcher($subject.toString()); while (${matcher}.find()) { ${matcher}.appendReplacement(${termResult}, ${termLastReplacement}); } ${matcher}.appendTail(${termResult}); ${ev.value} = UTF8String.fromString(${termResult}.toString()); $setEvNotNull """ }) } } /** * Extract a specific(idx) group identified by a Java regex. * * NOTE: this expression is not THREAD-SAFE, as it has some internal mutable status. */ @ExpressionDescription( usage = "_FUNC_(str, regexp[, idx]) - Extracts a group that matches `regexp`.", extended = """ Examples: > SELECT _FUNC_('100-200', '(\\d+)-(\\d+)', 1); 100 """) case class RegExpExtract(subject: Expression, regexp: Expression, idx: Expression) extends TernaryExpression with ImplicitCastInputTypes { def this(s: Expression, r: Expression) = this(s, r, Literal(1)) // last regex in string, we will update the pattern iff regexp value changed. @transient private var lastRegex: UTF8String = _ // last regex pattern, we cache it for performance concern @transient private var pattern: Pattern = _ override def nullSafeEval(s: Any, p: Any, r: Any): Any = { if (!p.equals(lastRegex)) { // regex value changed lastRegex = p.asInstanceOf[UTF8String].clone() pattern = Pattern.compile(lastRegex.toString) } val m = pattern.matcher(s.toString) if (m.find) { val mr: MatchResult = m.toMatchResult val group = mr.group(r.asInstanceOf[Int]) if (group == null) { // Pattern matched, but not optional group UTF8String.EMPTY_UTF8 } else { UTF8String.fromString(group) } } else { UTF8String.EMPTY_UTF8 } } override def dataType: DataType = StringType override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType, IntegerType) override def children: Seq[Expression] = subject :: regexp :: idx :: Nil override def prettyName: String = "regexp_extract" override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val termLastRegex = ctx.freshName("lastRegex") val termPattern = ctx.freshName("pattern") val classNamePattern = classOf[Pattern].getCanonicalName val matcher = ctx.freshName("matcher") val matchResult = ctx.freshName("matchResult") ctx.addMutableState("UTF8String", termLastRegex, s"${termLastRegex} = null;") ctx.addMutableState(classNamePattern, termPattern, s"${termPattern} = null;") val setEvNotNull = if (nullable) { s"${ev.isNull} = false;" } else { "" } nullSafeCodeGen(ctx, ev, (subject, regexp, idx) => { s""" if (!$regexp.equals(${termLastRegex})) { // regex value changed ${termLastRegex} = $regexp.clone(); ${termPattern} = ${classNamePattern}.compile(${termLastRegex}.toString()); } java.util.regex.Matcher ${matcher} = ${termPattern}.matcher($subject.toString()); if (${matcher}.find()) { java.util.regex.MatchResult ${matchResult} = ${matcher}.toMatchResult(); if (${matchResult}.group($idx) == null) { ${ev.value} = UTF8String.EMPTY_UTF8; } else { ${ev.value} = UTF8String.fromString(${matchResult}.group($idx)); } $setEvNotNull } else { ${ev.value} = UTF8String.EMPTY_UTF8; $setEvNotNull }""" }) } }
MLnick/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/regexpExpressions.scala
Scala
apache-2.0
15,588
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ package org.orbeon.oxf.fr.excel import java.util.Locale import java.{lang => jl} object NumberToTextConversionExamples { object ExampleConversion { private def doubleToHexString(d: Double) = "0x" + jl.Long.toHexString(jl.Double.doubleToLongBits(d)).toUpperCase(Locale.ROOT) + "L" def apply(rawDoubleBits: Long, javaRendering: String, excelRendering: String): ExampleConversion = { val doubleValue = jl.Double.longBitsToDouble(rawDoubleBits) if (javaRendering == "NaN") { if (! jl.Double.isNaN(doubleValue)) throw new IllegalArgumentException("value must be NaN") } else { if (jl.Double.isNaN(doubleValue)) throw new IllegalArgumentException("value must not be NaN") // just to be dead sure test conversion in Java both ways val javaToStringOk = javaRendering == jl.Double.toString(doubleValue) val javaParseOk = javaRendering.toDouble == doubleValue if (! javaToStringOk || ! javaParseOk) { val msgA = s"Specified `rawDoubleBits` `${ExampleConversion.doubleToHexString(doubleValue)}` encodes to double `$doubleValue`." val msgB = s"Specified `javaRendering` `$javaRendering` parses as double with `rawDoubleBits` `${ExampleConversion.doubleToHexString(javaRendering.toDouble)}`." throw new RuntimeException(msgA + ' ' + msgB) } } ExampleConversion( javaRendering, excelRendering, doubleValue, rawDoubleBits, ) } } case class ExampleConversion( javaRendering : String, excelRendering : String, doubleValue : Double, rawDoubleBits : Long ) { def isNaN: Boolean = jl.Double.isNaN(doubleValue) } // Number rendering examples as observed from Excel. val Examples = List( // basic rs ExampleConversion(0x0000000000000000L, "0.0", "0"), ExampleConversion(0x3FF0000000000000L, "1.0", "1"), ExampleConversion(0x3FF00068DB8BAC71L, "1.0001", "1.0001"), ExampleConversion(0x4087A00000000000L, "756.0", "756"), ExampleConversion(0x401E3D70A3D70A3DL, "7.56", "7.56"), ExampleConversion(0x405EDD3C07FB4C8CL, "123.4567890123455", "123.456789012345"), ExampleConversion(0x405EDD3C07FB4C99L, "123.45678901234568", "123.456789012346"), ExampleConversion(0x405EDD3C07FB4CAEL, "123.45678901234598", "123.456789012346"), ExampleConversion(0x4132D687E3DF2180L, "1234567.8901234567", "1234567.89012346"), ExampleConversion(0x3F543A272D9E0E49L, "0.001234567890123455", "0.00123456789012345"), ExampleConversion(0x3F543A272D9E0E4AL, "0.0012345678901234552", "0.00123456789012346"), ExampleConversion(0x3F543A272D9E0E55L, "0.0012345678901234576", "0.00123456789012346"), ExampleConversion(0x3F543A272D9E0E72L, "0.0012345678901234639", "0.00123456789012346"), ExampleConversion(0x3F543A272D9E0E76L, "0.0012345678901234647", "0.00123456789012346"), ExampleConversion(0x3F543A272D9E0E77L, "0.001234567890123465", "0.00123456789012346"), ExampleConversion(0x3F543A272D9E0E78L, "0.0012345678901234652", "0.00123456789012347"), ExampleConversion(0x3F543A272D9E0EA5L, "0.001234567890123475", "0.00123456789012347"), ExampleConversion(0x3F543A272D9E0EA6L, "0.0012345678901234751", "0.00123456789012348"), ExampleConversion(0x544CE6345CF3209CL, "1.2345678901234549E98", "1.23456789012345E+98"), ExampleConversion(0x544CE6345CF3209DL, "1.234567890123455E98", "1.23456789012346E+98"), ExampleConversion(0x544CE6345CF320DEL, "1.2345678901234649E98", "1.23456789012346E+98"), ExampleConversion(0x544CE6345CF320DFL, "1.234567890123465E98", "1.23456789012347E+98"), ExampleConversion(0x544CE6345CF32120L, "1.234567890123475E98", "1.23456789012347E+98"), ExampleConversion(0x544CE6345CF32121L, "1.2345678901234751E98", "1.23456789012348E+98"), ExampleConversion(0x54820FE0BA17F5E9L, "1.23456789012355E99", "1.2345678901236E+99"), ExampleConversion(0x54820FE0BA17F5EAL, "1.2345678901235502E99", "1.2345678901236E+99"), ExampleConversion(0x54820FE0BA17F784L, "1.2345678901236498E99", "1.2345678901237E+99"), ExampleConversion(0x54820FE0BA17F785L, "1.23456789012365E99", "1.2345678901237E+99"), ExampleConversion(0x54820FE0BA17F920L, "1.2345678901237498E99", "1.2345678901238E+99"), ExampleConversion(0x54820FE0BA17F921L, "1.23456789012375E99", "1.2345678901238E+99"), // transitions around the E98,E99,E100 boundaries ExampleConversion(0x547D42AEA2879F19L, "9.999999999999974E98", "9.99999999999997E+98"), ExampleConversion(0x547D42AEA2879F1AL, "9.999999999999975E98", "9.99999999999998E+98"), ExampleConversion(0x547D42AEA2879F21L, "9.999999999999984E98", "9.99999999999998E+98"), ExampleConversion(0x547D42AEA2879F22L, "9.999999999999985E98", "9.99999999999999E+98"), ExampleConversion(0x547D42AEA2879F2AL, "9.999999999999995E98", "9.99999999999999E+98"), ExampleConversion(0x547D42AEA2879F2BL, "9.999999999999996E98", "1E+99"), ExampleConversion(0x547D42AEA287A0A0L, "1.0000000000000449E99", "1E+99"), ExampleConversion(0x547D42AEA287A0A1L, "1.000000000000045E99", "1.0000000000001E+99"), ExampleConversion(0x547D42AEA287A3D8L, "1.0000000000001449E99", "1.0000000000001E+99"), ExampleConversion(0x547D42AEA287A3D9L, "1.000000000000145E99", "1.0000000000002E+99"), ExampleConversion(0x547D42AEA287A710L, "1.000000000000245E99", "1.0000000000002E+99"), ExampleConversion(0x547D42AEA287A711L, "1.0000000000002451E99", "1.0000000000003E+99"), ExampleConversion(0x54B249AD2594C2F9L, "9.999999999999744E99", "9.9999999999997E+99"), ExampleConversion(0x54B249AD2594C2FAL, "9.999999999999746E99", "9.9999999999998E+99"), ExampleConversion(0x54B249AD2594C32DL, "9.999999999999845E99", "9.9999999999998E+99"), ExampleConversion(0x54B249AD2594C32EL, "9.999999999999847E99", "9.9999999999999E+99"), ExampleConversion(0x54B249AD2594C360L, "9.999999999999944E99", "9.9999999999999E+99"), ExampleConversion(0x54B249AD2594C361L, "9.999999999999946E99", "1E+100"), ExampleConversion(0x54B249AD2594C464L, "1.0000000000000449E100", "1E+100"), ExampleConversion(0x54B249AD2594C465L, "1.000000000000045E100", "1.0000000000001E+100"), ExampleConversion(0x54B249AD2594C667L, "1.000000000000145E100", "1.0000000000001E+100"), ExampleConversion(0x54B249AD2594C668L, "1.0000000000001451E100", "1.0000000000002E+100"), ExampleConversion(0x54B249AD2594C86AL, "1.000000000000245E100", "1.0000000000002E+100"), ExampleConversion(0x54B249AD2594C86BL, "1.0000000000002452E100", "1.0000000000003E+100"), ExampleConversion(0x2B95DF5CA28EF4A8L, "1.0000000000000251E-98", "1.00000000000003E-98"), ExampleConversion(0x2B95DF5CA28EF4A7L, "1.000000000000025E-98", "1.00000000000002E-98"), ExampleConversion(0x2B95DF5CA28EF46AL, "1.000000000000015E-98", "1.00000000000002E-98"), ExampleConversion(0x2B95DF5CA28EF469L, "1.0000000000000149E-98", "1.00000000000001E-98"), ExampleConversion(0x2B95DF5CA28EF42DL, "1.0000000000000051E-98", "1.00000000000001E-98"), ExampleConversion(0x2B95DF5CA28EF42CL, "1.000000000000005E-98", "1E-98"), ExampleConversion(0x2B95DF5CA28EF3ECL, "9.999999999999946E-99", "1E-98"), ExampleConversion(0x2B95DF5CA28EF3EBL, "9.999999999999944E-99", "9.9999999999999E-99"), ExampleConversion(0x2B95DF5CA28EF3AEL, "9.999999999999845E-99", "9.9999999999999E-99"), ExampleConversion(0x2B95DF5CA28EF3ADL, "9.999999999999843E-99", "9.9999999999998E-99"), ExampleConversion(0x2B95DF5CA28EF371L, "9.999999999999746E-99", "9.9999999999998E-99"), ExampleConversion(0x2B95DF5CA28EF370L, "9.999999999999744E-99", "9.9999999999997E-99"), ExampleConversion(0x2B617F7D4ED8C7F5L, "1.000000000000245E-99", "1.0000000000003E-99"), ExampleConversion(0x2B617F7D4ED8C7F4L, "1.0000000000002449E-99", "1.0000000000002E-99"), ExampleConversion(0x2B617F7D4ED8C609L, "1.0000000000001452E-99", "1.0000000000002E-99"), ExampleConversion(0x2B617F7D4ED8C608L, "1.000000000000145E-99", "1.0000000000001E-99"), ExampleConversion(0x2B617F7D4ED8C41CL, "1.000000000000045E-99", "1.0000000000001E-99"), ExampleConversion(0x2B617F7D4ED8C41BL, "1.0000000000000449E-99", "1E-99"), ExampleConversion(0x2B617F7D4ED8C323L, "9.999999999999945E-100", "1E-99"), ExampleConversion(0x2B617F7D4ED8C322L, "9.999999999999943E-100", "9.9999999999999E-100"), ExampleConversion(0x2B617F7D4ED8C2F2L, "9.999999999999846E-100", "9.9999999999999E-100"), ExampleConversion(0x2B617F7D4ED8C2F1L, "9.999999999999844E-100", "9.9999999999998E-100"), ExampleConversion(0x2B617F7D4ED8C2C1L, "9.999999999999746E-100", "9.9999999999998E-100"), ExampleConversion(0x2B617F7D4ED8C2C0L, "9.999999999999744E-100", "9.9999999999997E-100"), // small numbers ExampleConversion(0x3EE9E409302678BAL, "1.2345678901234568E-5", "1.23456789012346E-05"), ExampleConversion(0x3F202E85BE180B74L, "1.2345678901234567E-4", "0.000123456789012346"), ExampleConversion(0x3F543A272D9E0E51L, "0.0012345678901234567", "0.00123456789012346"), ExampleConversion(0x3F8948B0F90591E6L, "0.012345678901234568", "0.0123456789012346"), ExampleConversion(0x3EE9E409301B5A02L, "1.23456789E-5", "0.0000123456789"), ExampleConversion(0x3E6E7D05BDABDE50L, "5.6789012345E-8", "0.000000056789012345"), ExampleConversion(0x3E6E7D05BDAD407EL, "5.67890123456E-8", "5.67890123456E-08"), ExampleConversion(0x3E6E7D06029F18BEL, "5.678902E-8", "0.00000005678902"), ExampleConversion(0x2BCB5733CB32AE6EL, "9.999999999999123E-98", "9.99999999999912E-98"), ExampleConversion(0x2B617F7D4ED8C59EL, "1.0000000000001235E-99", "1.0000000000001E-99"), ExampleConversion(0x0036319916D67853L, "1.2345678901234578E-307", "1.2345678901235E-307"), ExampleConversion(0x359DEE7A4AD4B81FL, "2.0E-50", "2E-50"), // large numbers ExampleConversion(0x41678C29DCD6E9E0L, "1.2345678901234567E7", "12345678.9012346"), ExampleConversion(0x42A674E79C5FE523L, "1.2345678901234568E13", "12345678901234.6"), ExampleConversion(0x42DC12218377DE6BL, "1.2345678901234567E14", "123456789012346"), ExampleConversion(0x43118B54F22AEB03L, "1.2345678901234568E15", "1234567890123460"), ExampleConversion(0x43E56A95319D63E1L, "1.2345678901234567E19", "12345678901234600000"), ExampleConversion(0x441AC53A7E04BCDAL, "1.2345678901234568E20", "1.23456789012346E+20"), ExampleConversion(0xC3E56A95319D63E1L, "-1.2345678901234567E19", "-12345678901234600000"), ExampleConversion(0xC41AC53A7E04BCDAL, "-1.2345678901234568E20", "-1.23456789012346E+20"), ExampleConversion(0x54820FE0BA17F46DL, "1.2345678901234577E99", "1.2345678901235E+99"), ExampleConversion(0x54B693D8E89DF188L, "1.2345678901234576E100", "1.2345678901235E+100"), ExampleConversion(0x4A611B0EC57E649AL, "2.0E50", "2E+50"), // range extremities ExampleConversion(0x7FEFFFFFFFFFFFFFL, "1.7976931348623157E308", "1.7976931348623E+308"), ExampleConversion(0x0010000000000000L, "2.2250738585072014E-308", "2.2250738585072E-308"), ExampleConversion(0x000FFFFFFFFFFFFFL, "2.225073858507201E-308", "0"), ExampleConversion(0x0000000000000001L, "4.9E-324", "0"), // infinity ExampleConversion(0x7FF0000000000000L, "Infinity", "1.7976931348623E+308"), ExampleConversion(0xFFF0000000000000L, "-Infinity", "1.7976931348623E+308"), // shortening due to rounding ExampleConversion(0x441AC7A08EAD02F2L, "1.234999999999999E20", "1.235E+20"), ExampleConversion(0x40FE26BFFFFFFFF9L, "123499.9999999999", "123500"), ExampleConversion(0x3E4A857BFB2F2809L, "1.234999999999999E-8", "0.00000001235"), ExampleConversion(0x3BCD291DEF868C89L, "1.234999999999999E-20", "1.235E-20"), // carry up due to rounding // For clarity these tests choose values that don't round in Java, // but will round in Excel. In some cases there is almost no difference // between Excel and Java (e.g. 9.9..9E-8) ExampleConversion(0x444B1AE4D6E2EF4FL, "9.999999999999999E20", "1E+21"), ExampleConversion(0x412E847FFFFFFFFFL, "999999.9999999999", "1000000"), ExampleConversion(0x3E45798EE2308C39L, "9.999999999999999E-9", "0.00000001"), ExampleConversion(0x3C32725DD1D243ABL, "9.999999999999999E-19", "0.000000000000000001"), ExampleConversion(0x3BFD83C94FB6D2ABL, "9.999999999999999E-20", "1E-19"), ExampleConversion(0xC44B1AE4D6E2EF4FL, "-9.999999999999999E20", "-1E+21"), ExampleConversion(0xC12E847FFFFFFFFFL, "-999999.9999999999", "-1000000"), ExampleConversion(0xBE45798EE2308C39L, "-9.999999999999999E-9", "-0.00000001"), ExampleConversion(0xBC32725DD1D243ABL, "-9.999999999999999E-19", "-0.000000000000000001"), ExampleConversion(0xBBFD83C94FB6D2ABL, "-9.999999999999999E-20", "-1E-19"), // NaNs // Currently these test cases are not critical, since other limitations prevent any variety in // or control of the bit patterns used to encode NaNs in evaluations. ExampleConversion(0xFFFF0420003C0000L, "NaN", "3.484840871308E+308"), ExampleConversion(0x7FF8000000000000L, "NaN", "2.6965397022935E+308"), ExampleConversion(0x7FFF0420003C0000L, "NaN", "3.484840871308E+308"), ExampleConversion(0xFFF8000000000000L, "NaN", "2.6965397022935E+308"), ExampleConversion(0xFFFF0AAAAAAAAAAAL, "NaN", "3.4877119413344E+308"), ExampleConversion(0x7FF80AAAAAAAAAAAL, "NaN", "2.7012211948322E+308"), ExampleConversion(0xFFFFFFFFFFFFFFFFL, "NaN", "3.5953862697246E+308"), ExampleConversion(0x7FFFFFFFFFFFFFFFL, "NaN", "3.5953862697246E+308"), ExampleConversion(0xFFF7FFFFFFFFFFFFL, "NaN", "2.6965397022935E+308") ) }
orbeon/orbeon-forms
form-runner/jvm/src/test/scala/org/orbeon/oxf/fr/excel/NumberToTextConversionExamples.scala
Scala
lgpl-2.1
15,195
package isabelle.eclipse.launch.tabs import org.eclipse.core.runtime.{IPath, IProgressMonitor, IStatus, Status} import org.eclipse.core.runtime.jobs.{ISchedulingRule, Job} import org.eclipse.debug.core.{ILaunchConfiguration, ILaunchConfigurationWorkingCopy} import org.eclipse.jface.dialogs.IDialogConstants import org.eclipse.jface.layout.{GridDataFactory, GridLayoutFactory} import org.eclipse.jface.resource.{JFaceResources, LocalResourceManager} import org.eclipse.jface.viewers.{ CheckStateChangedEvent, CheckboxTreeViewer, ICheckStateListener, TreeViewer } import org.eclipse.jface.wizard.ProgressMonitorPart import org.eclipse.swt.SWT import org.eclipse.swt.widgets.{Composite, Group} import org.eclipse.ui.dialogs.{FilteredTree, PatternFilter} import isabelle.eclipse.core.app.IsabelleBuild import isabelle.eclipse.launch.IsabelleLaunchPlugin import isabelle.eclipse.launch.config.{IsabelleLaunch, IsabelleLaunchConstants} import isabelle.eclipse.launch.config.LaunchConfigUtil.{configValue, resolvePath, setConfigValue} import AccessibleUtil.addControlAccessibleListener /** * A launch configuration component to select an Isabelle session (logic) in the * given Isabelle directory. * * Depends on Isabelle directory selection component. * * @author Andrius Velykis */ class SessionSelectComponent(isaPathObservable: ObservableValue[Option[String]], sessionDirsObservable: ObservableValue[Seq[String]], envMapObservable: ObservableValue[Map[String, String]], systemPropertiesObservable: ObservableValue[Map[String, String]]) extends LaunchComponent[Option[String]] { def attributeName = IsabelleLaunchConstants.ATTR_SESSION private var sessionCheck = new SingleCheckStateProvider[CheckboxTreeViewer] private var progressMonitorPart: ProgressMonitorPart = _ private var container: LaunchComponentContainer = _ private var lastFinishedJob: Option[SessionLoadJob] = None private var sessionLoadJob: Option[SessionLoadJob] = None private var lastLoadError: Option[IStatus] = None /** * Creates the controls needed to select logic for the Isabelle installation. */ override def createControl(parent: Composite, container: LaunchComponentContainer) { this.container = container val group = new Group(parent, SWT.NONE) group.setText("&Session:") group.setLayout(GridLayoutFactory.swtDefaults.create) group.setLayoutData(GridDataFactory.fillDefaults.grab(true, true).create) group.setFont(parent.getFont) val filteredSessionsViewer = new SessionFilteredTree(group, SWT.BORDER) val sessionsViewer = filteredSessionsViewer.getViewer addControlAccessibleListener(sessionsViewer.getControl, group.getText) val monitorComposite = new Composite(group, SWT.NONE) monitorComposite.setLayout(GridLayoutFactory.fillDefaults.numColumns(2).create) monitorComposite.setLayoutData(GridDataFactory.fillDefaults.grab(true, false).create) progressMonitorPart = new ProgressMonitorPart(monitorComposite, GridLayoutFactory.fillDefaults.create, false) progressMonitorPart.setLayoutData(GridDataFactory.fillDefaults.grab(true, false).create) progressMonitorPart.setFont(parent.getFont) monitorComposite.setVisible(false) // on config change in Isabelle path, update the session selection // (only do after UI initialisation) isaPathObservable subscribe sessionLocsChanged // the same for session dirs change sessionDirsObservable subscribe sessionLocsChanged envMapObservable subscribe sessionLocsChanged } private def createCheckboxTreeViewer(parent: Composite, style: Int): CheckboxTreeViewer = { val sessionsViewer = new CheckboxTreeViewer(parent, SWT.CHECK | SWT.SINGLE | SWT.FULL_SELECTION | style) sessionsViewer.getControl.setLayoutData(GridDataFactory.fillDefaults. grab(true, true).hint(IDialogConstants.ENTRY_FIELD_WIDTH, 50).create) val resourceManager = new LocalResourceManager( JFaceResources.getResources, sessionsViewer.getControl) sessionsViewer.setLabelProvider(new SessionLabelProvider(resourceManager)) sessionsViewer.setContentProvider(new ArrayTreeContentProvider) sessionCheck.initViewer(sessionsViewer) sessionsViewer.setCheckStateProvider(sessionCheck) sessionsViewer.setInput(Array()) sessionsViewer.addCheckStateListener(new ICheckStateListener { override def checkStateChanged(event: CheckStateChangedEvent) = configModified() }) sessionsViewer } override def initializeFrom(configuration: ILaunchConfiguration) { val sessionName = configValue(configuration, attributeName, "") reloadAvailableSessions(Some(configuration)) selectedSession = if (sessionName.isEmpty) None else Some(sessionName) } override def value = selectedSession private def selectedSession: Option[String] = { sessionCheck.checked map (_.toString) } private def selectedSession_= (value: Option[String]): Unit = { sessionCheck.checked = value } private def sessionLocsChanged() = reloadAvailableSessions() private def reloadAvailableSessions(configuration: Option[ILaunchConfiguration] = None) { val isaPath = isaPathObservable.value // if there is a config available, read environment map from it, otherwise ask // the observable (the observable may be uninitialised) val configEnvMap = configuration.map(conf => IsabelleLaunch.environmentMap(conf).right.toOption).flatten val envMap = configEnvMap getOrElse envMapObservable.value val systemProps = systemPropertiesObservable.value // same for more dirs (observable may be uninitialised) val configMoreDirs = configuration.map(conf => configValue(conf, IsabelleLaunchConstants.ATTR_SESSION_DIRS, List[String]())) val moreDirs = configMoreDirs getOrElse sessionDirsObservable.value val resolvedDirs = moreDirs map resolvePath // allow only valid session dirs to avoid crashing the session lookup val moreDirsSafe = resolvedDirs filter IsabelleBuild.isSessionDir isaPath match { case None => { sessionLoadJob = None finishedLoadingSessions(None, Right(Nil), false) } case Some(path) => { val newLoadJob = Some(SessionLoadJob(path, moreDirsSafe, envMap, systemProps)) if (lastFinishedJob == newLoadJob) { // same job, avoid reloading sessionLoadJob = None } else { progressMonitorPart.beginTask("Loading available sessions...", IProgressMonitor.UNKNOWN) progressMonitorPart.getParent.setVisible(true) sessionLoadJob = newLoadJob sessionLoadJob.get.schedule() } } } } private case class SessionLoadJob(isaPath: String, moreDirs: Seq[IPath], envMap: Map[String, String], systemProperties: Map[String, String]) extends Job("Loading available sessions...") { // avoid parallel loads using the sync rule setRule(syncLoadRule) override protected def run(monitor: IProgressMonitor): IStatus = { val sessionLoad = IsabelleLaunch.availableSessions( isaPath, moreDirs, envMap, systemProperties) SWTUtil.asyncUnlessDisposed(Option(sessionCheck.viewer.getControl)) { finishedLoadingSessions(Some(this), sessionLoad, true) } // always return OK to avoid jarring error messages in UI - the error is reported // by logging here and in #finishedLoadingSessions() then #isValid() // sessionLoad fold ( err => err, success => Status.OK_STATUS ) sessionLoad.left foreach IsabelleLaunchPlugin.log Status.OK_STATUS } } lazy val syncLoadRule = new ISchedulingRule { def contains(rule: ISchedulingRule) = rule == this def isConflicting(rule: ISchedulingRule) = rule == this } private def finishedLoadingSessions(loadJob: Option[SessionLoadJob], sessionsEither: Either[IStatus, List[String]], callback: Boolean) = if (sessionLoadJob == loadJob && !sessionCheck.viewer.getControl.isDisposed) { // correct loading job and config still open val sessions = sessionsEither.right getOrElse Nil val currentSelection = selectedSession // if the previously selected session is available, keep the selection // otherwise, reset it or select a sensible default val newSelection = (selectedSession, sessions) match { case (_, Nil) => None case (Some(selected), ss) if ss.contains(selected) => Some(selected) // if only one session available, select it // TODO suggest some default value, e.g. HOL? case (None, first :: Nil) => Some(first) case _ => None } sessionCheck.viewer.setInput(sessions.toArray) selectedSession = newSelection sessionLoadJob = None lastFinishedJob = loadJob lastLoadError = sessionsEither.left.toOption progressMonitorPart.getParent.setVisible(false) progressMonitorPart.done() if (callback) { container.update() } } override def performApply(configuration: ILaunchConfigurationWorkingCopy) { setConfigValue(configuration, attributeName, selectedSession) } override def isValid(configuration: ILaunchConfiguration, newConfig: Boolean): Option[Either[String, String]] = if (sessionLoadJob.isDefined) { // still have not finished the loading job, cannot validate Some(Left("Loading available Isabelle logics for selection...")) } else if (lastLoadError.isDefined) { Some(Left(lastLoadError.get.getMessage)) } else if (sessionCheck.viewer.getTree.getItemCount == 0) { Some(Left("There are no Isabelle logics available in the indicated location")) } else selectedSession match { // found selection - no errors case Some(session) => None // either urge to select for new config, or report error case None => if (newConfig) { Some(Right("Please select an Isabelle logic for the indicated location")) } else { Some(Left("Isabelle logic must be selected")) } } // notify listeners private def configModified() = publish() /** * A FilteredTree with sessions checkbox tree viewer as main control */ private class SessionFilteredTree(parent: Composite, treeStyle: Int) extends FilteredTree(parent, treeStyle, new PatternFilter(), true) { override protected def doCreateTreeViewer(parent: Composite, style: Int): TreeViewer = createCheckboxTreeViewer(parent, style) } }
andriusvelykis/isabelle-eclipse
isabelle.eclipse.launch/src/isabelle/eclipse/launch/tabs/SessionSelectComponent.scala
Scala
epl-1.0
10,990
package lila.rating case class RatingRange(min: Int, max: Int) { def contains(rating: Int) = rating >= min && rating <= max def notBroad: Option[RatingRange] = (this != RatingRange.broad) option this override def toString = "%d-%d".format(min, max) } object RatingRange { val min = 800 val max = 2900 val broad = RatingRange(min, max) val default = broad // ^\\d{3,4}\\-\\d{3,4}$ def apply(from: String): Option[RatingRange] = for { min ← parseIntOption(from takeWhile ('-' !=)) if acceptable(min) max ← parseIntOption(from dropWhile ('-' !=) tail) if acceptable(max) if min <= max } yield RatingRange(min, max) def orDefault(from: String) = apply(from) | default def noneIfDefault(from: String) = apply(from) filter (_ != default) def valid(from: String) = apply(from).isDefined private def acceptable(rating: Int) = broad contains rating }
bjhaid/lila
modules/rating/src/main/RatingRange.scala
Scala
mit
903
package com.cloudray.scalapress.media import java.io.InputStream import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component import org.springframework.web.multipart.MultipartFile import com.cloudray.scalapress.framework.ScalapressContext /** @author Stephen Samuel */ @Component @Autowired class AssetService(assetStore: AssetStore, context: ScalapressContext) { /** Removes the given key from the asset store, or does nothing if the key does not exist */ def delete(key: String): Unit = assetStore.delete(key) /** Returns an Asset class for the given key. */ def asset(key: String): Asset = assetStore.toAsset(key) /** Adds the given multipart files to the asset store using the original * filenames as the keys. If a key is already in use then it will modify * the key so that it is unique. * * This method will silently ignore any files that have zero byte content. * * @return the keys that the assets were stored under */ def upload(files: Seq[MultipartFile]): Seq[String] = files.filterNot(_.isEmpty).map(upload) /** Adds the given multipart file to the asset store using the original * filename as the key. If the key is already in use then it will modify * the key so that it is unique. * * @return the key that the asset was stored under */ def upload(file: MultipartFile): String = { val key = file.getOriginalFilename val in = file.getInputStream add(key, in) } /** Adds the given stream under a unique key. The given key is used as a hint and may * not be the actual key used. This method guarantees not to overwrite any existing asset. * * @return the key that the asset was stored under */ def add(key: String, input: InputStream): String = { val asset = adapt(key, input) assetStore.add(asset._1, asset._2) } /** Adds the given stream to the asset store, overriding any existing asset that * is stored under the same key. This method guarantees to use the key provided. */ def put(key: String, input: InputStream) { val asset = adapt(key, input) assetStore.put(asset._1, asset._2) } private def adapt(asset: (String, InputStream)): (String, InputStream) = { val listeners = context.beans[AssetLifecycleListener] val op = (a: (String, InputStream), b: AssetLifecycleListener) => b.onStore(a._1, a._2) listeners.foldLeft(asset)(op) } }
vidyacraghav/scalapress
src/main/scala/com/cloudray/scalapress/media/AssetService.scala
Scala
apache-2.0
2,482
package com.datawizards.sparklocal.rdd.pair import com.datawizards.sparklocal.SparkLocalBaseTest import com.datawizards.sparklocal.rdd.RDDAPI import org.apache.spark.HashPartitioner import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class CogroupTest extends SparkLocalBaseTest { val data = Seq((1,11),(1,12),(2,22),(3,33),(3,34),(4,44)) val other1 = Seq((1,"a1"), (1,"a2"), (2,"b")) val other2 = Seq((3,30.0), (4,40.0)) val other3 = Seq((2,-2),(4,-4)) test("cogroup(other) result") { assertRDDOperationResultWithSorted(RDDAPI(data).cogroup(RDDAPI(other1))) { Array( (1,(Iterable(11,12),Iterable("a1","a2"))), (2,(Iterable(22),Iterable("b"))), (3,(Iterable(33,34),Iterable.empty)), (4,(Iterable(44),Iterable.empty)) ) } } test("cogroup(other1,other2) result") { assertRDDOperationResultWithSorted(RDDAPI(data).cogroup(RDDAPI(other1),RDDAPI(other2))) { Array( (1,(Iterable(11,12),Iterable("a1","a2"),Iterable.empty)), (2,(Iterable(22),Iterable("b"),Iterable.empty)), (3,(Iterable(33,34),Iterable.empty,Iterable(30.0))), (4,(Iterable(44),Iterable.empty,Iterable(40.0))) ) } } test("cogroup(other1,other2,other3) result") { assertRDDOperationResultWithSorted(RDDAPI(data).cogroup(RDDAPI(other1),RDDAPI(other2),RDDAPI(other3))) { Array( (1,(Iterable(11,12),Iterable("a1","a2"),Iterable.empty,Iterable.empty)), (2,(Iterable(22),Iterable("b"),Iterable.empty,Iterable(-2))), (3,(Iterable(33,34),Iterable.empty,Iterable(30.0),Iterable.empty)), (4,(Iterable(44),Iterable.empty,Iterable(40.0),Iterable(-4))) ) } } test("cogroup(other) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1)) } } test("cogroup(other) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd) } } test("cogroup(other1,other2) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1),RDDAPI(other2)) } } test("cogroup(other1,other2) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd,RDDAPI(other2)) } } test("cogroup(other1,other2,other3) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1),RDDAPI(other2),RDDAPI(other3)) } } test("cogroup(other1,other2,other3) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd,RDDAPI(other2),RDDAPI(other3)) } } test("cogroup(other,numPartitions) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1), 2) } } test("cogroup(other,numPartitions) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd, 2) } } test("cogroup(other1,other2,numPartitions) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1),RDDAPI(other2), 2) } } test("cogroup(other1,other2,numPartitions) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd,RDDAPI(other2), 2) } } test("cogroup(other1,other2,other3,numPartitions) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1),RDDAPI(other2),RDDAPI(other3), 2) } } test("cogroup(other1,other2,other3,numPartitions) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd,RDDAPI(other2),RDDAPI(other3), 2) } } test("cogroup(other,partitioner) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1), new HashPartitioner(2)) } } test("cogroup(other,partitioner) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd, new HashPartitioner(2)) } } test("cogroup(other1,other2,partitioner) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1),RDDAPI(other2), new HashPartitioner(2)) } } test("cogroup(other1,other2,partitioner) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd,RDDAPI(other2), new HashPartitioner(2)) } } test("cogroup(other1,other2,other3,partitioner) equal - Scala cogroup Spark") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => rdd.cogroup(RDDAPI(other1),RDDAPI(other2),RDDAPI(other3), new HashPartitioner(2)) } } test("cogroup(other1,other2,other3,partitioner) equal - Spark cogroup Scala") { assertRDDOperationReturnsSameResultWithSorted(data){ rdd => RDDAPI(other1).cogroup(rdd,RDDAPI(other2),RDDAPI(other3), new HashPartitioner(2)) } } }
piotr-kalanski/spark-local
src/test/scala/com/datawizards/sparklocal/rdd/pair/CogroupTest.scala
Scala
apache-2.0
5,433
package dtc import java.time.{LocalDate, LocalTime} import simulacrum.typeclass import scala.language.implicitConversions /** * Provides a way to capture `java.time` based instant into a value of type A. */ @typeclass trait Capture[A] { /** * Java time based constructor for values of type A. * Resulting time point will match specified local date and time at specified zone. * * @param date local date part of A * @param time local time part of A * @param zone time zone context for specified local date and time */ def capture(date: LocalDate, time: LocalTime, zone: TimeZoneId): A } object Capture { def apply[A](date: LocalDate, time: LocalTime, zone: TimeZoneId)(implicit C: Capture[A]): A = C.capture(date, time, zone) }
vpavkin/dtc
core/shared/src/main/scala/dtc/Capture.scala
Scala
apache-2.0
776
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package play.it.action import akka.stream.scaladsl.Source import play.shaded.ahc.io.netty.handler.codec.http.HttpHeaders import org.specs2.mutable.Specification import play.api.http.HeaderNames._ import play.api.http.Status._ import play.api.libs.ws.WSClient import play.api.libs.ws.WSResponse import play.api.mvc._ import play.api.routing.Router.Routes import play.api.routing.sird._ import play.api.test._ import play.core.server.Server import play.it._ import play.it.tools.HttpBinApplication._ import scala.concurrent.ExecutionContext.Implicits.global import play.shaded.ahc.org.asynchttpclient.netty.NettyResponse import play.api.libs.typedmap.TypedKey import scala.concurrent.Future class NettyHeadActionSpec extends HeadActionSpec with NettyIntegrationSpecification class AkkaHttpHeadActionSpec extends HeadActionSpec with AkkaHttpIntegrationSpecification trait HeadActionSpec extends Specification with FutureAwaits with DefaultAwaitTimeout with ServerIntegrationSpecification { sequential "HEAD requests" should { def webSocketResponse(implicit Action: DefaultActionBuilder): Routes = { case GET(p"/ws") => WebSocket.acceptOrResult[String, String] { request => Future.successful(Left(Results.Forbidden)) } } def chunkedResponse(implicit Action: DefaultActionBuilder): Routes = { case GET(p"/chunked") => Action { request => Results.Ok.chunked(Source(List("a", "b", "c"))) } } def routes(implicit Action: DefaultActionBuilder) = get // GET /get .orElse(patch) // PATCH /patch .orElse(post) // POST /post .orElse(put) // PUT /put .orElse(delete) // DELETE /delete .orElse(stream) // GET /stream/0 .orElse(chunkedResponse) // GET /chunked .orElse(webSocketResponse) // GET /ws def withServer[T](block: WSClient => T): T = { // Routes from HttpBinApplication Server.withRouterFromComponents()(components => routes(components.defaultActionBuilder)) { implicit port => WsTestClient.withClient(block) } } def serverWithHandler[T](handler: Handler)(block: WSClient => T): T = { Server.withRouter() { case _ => handler } { implicit port => WsTestClient.withClient(block) } } "return 400 in response to a HEAD in a WebSocket handler" in withServer { client => val result = await(client.url("/ws").head()) result.status must_== BAD_REQUEST } "return 200 in response to a URL with a GET handler" in withServer { client => val result = await(client.url("/get").head()) result.status must_== OK } "return an empty body" in withServer { client => val result = await(client.url("/get").head()) result.body.length must_== 0 } "match the headers of an equivalent GET" in withServer { client => val collectedFutures = for { headResponse <- client.url("/get").head() getResponse <- client.url("/get").get() } yield List(headResponse, getResponse) val responses = await(collectedFutures) val headHeaders = responses(0).underlying[NettyResponse].getHeaders val getHeaders: HttpHeaders = responses(1).underlying[NettyResponse].getHeaders // Exclude `Date` header because it can vary between requests import scala.collection.JavaConverters._ val firstHeaders = headHeaders.remove(DATE) val secondHeaders = getHeaders.remove(DATE) // HTTPHeaders doesn't seem to be anything as simple as an equals method, so let's compare A !< B && B >! A val notInFirst = secondHeaders.asScala.collectFirst { case entry if !firstHeaders.contains(entry.getKey, entry.getValue, true) => entry } val notInSecond = firstHeaders.asScala.collectFirst { case entry if !secondHeaders.contains(entry.getKey, entry.getValue, true) => entry } notInFirst must beEmpty notInSecond must beEmpty } "return 404 in response to a URL without an associated GET handler" in withServer { client => val collectedFutures = for { putRoute <- client.url("/put").head() patchRoute <- client.url("/patch").head() postRoute <- client.url("/post").head() deleteRoute <- client.url("/delete").head() } yield List(putRoute, patchRoute, postRoute, deleteRoute) val responseList = await(collectedFutures) foreach(responseList)((_: WSResponse).status must_== NOT_FOUND) } val CustomAttr = TypedKey[String]("CustomAttr") val attrAction = ActionBuilder.ignoringBody { (rh: RequestHeader) => val attrComment = rh.attrs.get(CustomAttr) val headers = Array.empty[(String, String)] ++ rh.attrs.get(CustomAttr).map("CustomAttr" -> _) Results.Ok.withHeaders(headers: _*) } "modify request with DefaultHttpRequestHandler" in serverWithHandler( Handler.Stage.modifyRequest( (rh: RequestHeader) => rh.addAttr(CustomAttr, "y"), attrAction ) ) { client => val result = await(client.url("/get").head()) result.status must_== OK result.header("CustomAttr") must beSome("y") } "omit Content-Length for chunked responses" in withServer { client => val response = await(client.url("/chunked").head()) response.body must_== "" response.header(CONTENT_LENGTH) must beNone } "Keep Content-Length for streamed responses" in withServer { client => val response = await(client.url("/stream/10").head()) response.body must_== "" response.header(CONTENT_LENGTH) must beSome("10") } } }
marcospereira/playframework
core/play-integration-test/src/it/scala/play/it/action/HeadActionSpec.scala
Scala
apache-2.0
5,874
package example import org.scalatest.FunSuite import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner /** * This class implements a ScalaTest test suite for the methods in object * `Lists` that need to be implemented as part of this assignment. A test * suite is simply a collection of individual tests for some specific * component of a program. * * A test suite is created by defining a class which extends the type * `org.scalatest.FunSuite`. When running ScalaTest, it will automatically * find this class and execute all of its tests. * * Adding the `@RunWith` annotation enables the test suite to be executed * inside eclipse using the built-in JUnit test runner. * * You have two options for running this test suite: * * - Start the sbt console and run the "test" command * - Right-click this file in eclipse and chose "Run As" - "JUnit Test" */ @RunWith(classOf[JUnitRunner]) class ListsSuite extends FunSuite { /** * Tests are written using the `test` operator which takes two arguments: * * - A description of the test. This description has to be unique, no two * tests can have the same description. * - The test body, a piece of Scala code that implements the test * * The most common way to implement a test body is using the method `assert` * which tests that its argument evaluates to `true`. So one of the simplest * successful tests is the following: */ test("one plus one is two")(assert(1 + 1 == 2)) /** * In Scala, it is allowed to pass an argument to a method using the block * syntax, i.e. `{ argument }` instead of parentheses `(argument)`. * * This allows tests to be written in a more readable manner: */ test("one plus one is three?") { assert(1 + 1 === 2) // This assertion fails! Go ahead and fix it. } /** * One problem with the previous (failing) test is that ScalaTest will * only tell you that a test failed, but it will not tell you what was * the reason for the failure. The output looks like this: * * {{{ * [info] - one plus one is three? *** FAILED *** * }}} * * This situation can be improved by using a special equality operator * `===` instead of `==` (this is only possible in ScalaTest). So if you * run the next test, ScalaTest will show the following output: * * {{{ * [info] - details why one plus one is not three *** FAILED *** * [info] 2 did not equal 3 (ListsSuite.scala:67) * }}} * * We recommend to always use the `===` equality operator when writing tests. */ test("details why one plus one is not three") { assert(1 + 1 === 2) // Fix me, please! } /** * In order to test the exceptional behavior of a methods, ScalaTest offers * the `intercept` operation. * * In the following example, we test the fact that the method `intNotZero` * throws an `IllegalArgumentException` if its argument is `0`. */ test("intNotZero throws an exception if its argument is 0") { intercept[IllegalArgumentException] { intNotZero(0) } } def intNotZero(x: Int): Int = { if (x == 0) throw new IllegalArgumentException("zero is not allowed") else x } /** * Now we finally write some tests for the list functions that have to be * implemented for this assignment. We fist import all members of the * `List` object. */ import Lists._ /** * We only provide two very basic tests for you. Write more tests to make * sure your `sum` and `max` methods work as expected. * * In particular, write tests for corner cases: negative numbers, zeros, * empty lists, lists with repeated elements, etc. * * It is allowed to have multiple `assert` statements inside one test, * however it is recommended to write an individual `test` statement for * every tested aspect of a method. */ test("sum of a few numbers") { assert(sum(List(1,2,0)) === 3) } test("sum of empty list") { assert(sum(List()) === 0) } test("sum of list with three same elements") { assert(sum(List(3,3,3)) === 9) } test("sum of negative and positive numbers which makes result 0") { assert(sum(List(3,-3)) === 0) } test("max of a few numbers") { assert(max(List(3, 7, 2)) === 7) } test("max of one number") { assert(max(List(1)) === 1) } test("max of negative numbers") { assert(max(List(-7, -3, -5)) === -3) } }
spolnik/scala-workspace
scala-learning/example/src/test/scala/example/ListsSuite.scala
Scala
apache-2.0
4,448
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dataset import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.utils.T import org.scalatest.{FlatSpec, Matchers} class MiniBatchSpec extends FlatSpec with Matchers { "TensorMiniBatch size" should "return right result" in { val a = Tensor[Float](3, 4).range(1, 12, 1) val b = Tensor[Float](3).range(1, 3, 1) val miniBatch = MiniBatch(a, b) miniBatch.size() should be (3) } "TensorMiniBatch getInput/target" should "return right result" in { val a = Tensor[Float](3, 4).range(1, 12, 1) val b = Tensor[Float](3).range(1, 3, 1) val miniBatch = MiniBatch(a, b) miniBatch.getInput() should be (a) miniBatch.getTarget() should be (b) } "TensorMiniBatch slice" should "return right result" in { val a = Tensor[Float](3, 4).range(1, 12, 1) val b = Tensor[Float](3).range(1, 3, 1) val miniBatch = MiniBatch(a, b) miniBatch.slice(1, 1).getInput() should be (Tensor[Float](1, 4).range(1, 4, 1)) miniBatch.slice(2, 1).getInput() should be (Tensor[Float](1, 4).range(5, 8, 1)) miniBatch.slice(3, 1).getInput() should be (Tensor[Float](1, 4).range(9, 12, 1)) miniBatch.slice(1, 1).getTarget() should be (Tensor[Float](1).fill(1)) miniBatch.slice(2, 1).getTarget() should be (Tensor[Float](1).fill(2)) miniBatch.slice(3, 1).getTarget() should be (Tensor[Float](1).fill(3)) } "ArrayTensorMiniBatch size" should "return right result" in { val a1 = Tensor[Float](3, 4).range(1, 12, 1) val a2 = Tensor[Float](3, 2).range(1, 6, 1) val b = Tensor[Float](3).range(1, 3, 1) val miniBatch = MiniBatch(Array(a1, a2), b) miniBatch.size() should be (3) } "ArrayTensorMiniBatch getInput/target" should "return right result" in { val a1 = Tensor[Float](3, 4).range(1, 12, 1) val a2 = Tensor[Float](3, 2).range(1, 6, 1) val b = Tensor[Float](3).range(1, 3, 1) val miniBatch = MiniBatch(Array(a1, a2), b) miniBatch.getInput() should be (T(a1, a2)) miniBatch.getTarget() should be (b) } "ArrayTensorMiniBatch slice" should "return right result" in { val a1 = Tensor[Float](3, 2, 2).range(1, 12, 1) val a2 = Tensor[Float](3, 2).range(1, 6, 1) val b = Tensor[Float](3).range(1, 3, 1) val miniBatch = MiniBatch(Array(a1, a2), b) miniBatch.slice(1, 1).getInput() should be (T(Tensor[Float](1, 2, 2).range(1, 4, 1), Tensor[Float](1, 2).range(1, 2, 1))) miniBatch.slice(2, 1).getInput() should be (T(Tensor[Float](1, 2, 2).range(5, 8, 1), Tensor[Float](1, 2).range(3, 4, 1))) miniBatch.slice(3, 1).getInput() should be (T(Tensor[Float](1, 2, 2).range(9, 12, 1), Tensor[Float](1, 2).range(5, 6, 1))) miniBatch.slice(1, 1).getTarget() should be (Tensor[Float](1).fill(1)) miniBatch.slice(2, 1).getTarget() should be (Tensor[Float](1).fill(2)) miniBatch.slice(3, 1).getTarget() should be (Tensor[Float](1).fill(3)) } }
JerryYanWan/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/dataset/MiniBatchSpec.scala
Scala
apache-2.0
3,547
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.connector.write private[sql] case class PhysicalWriteInfoImpl(numPartitions: Int) extends PhysicalWriteInfo
maropu/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/connector/write/PhysicalWriteInfoImpl.scala
Scala
apache-2.0
940
package demo.akka.mapreduce.actor import akka.actor.actorRef2Scala import akka.actor.Actor import akka.actor.ActorRef import demo.akka.mapreduce.data.MapData import demo.akka.mapreduce.data.ReduceData import java.util.ArrayList import scala.collection.mutable.HashMap import demo.akka.mapreduce.data.WordCount import scala.collection.mutable.ArrayBuffer class ReduceActor(aggregateActor: ActorRef) extends Actor { def receive: Receive = { case message: MapData => aggregateActor ! reduce(message.dataList) case _ => } def reduce(dataList: ArrayBuffer[WordCount]): ReduceData = { var reducedMap = HashMap[String, Int]() for (wc:WordCount <- dataList) { var word: String = wc.word if (reducedMap.contains(word)) { reducedMap.put(word, reducedMap.get(word).get + 1 ) } else { reducedMap.put(word, 1) } } return new ReduceData(reducedMap) } }
jianwu-github/akka-mapreduce-demo
src/main/scala/demo/akka/mapreduce/actor/ReduceActor.scala
Scala
cc0-1.0
882
package com.wavesplatform.api.http.requests import com.wavesplatform.account.PublicKey import com.wavesplatform.lang.ValidationError import com.wavesplatform.transaction.Proofs import com.wavesplatform.transaction.lease.LeaseCancelTransaction import play.api.libs.functional.syntax._ import play.api.libs.json._ case class SignedLeaseCancelV1Request( senderPublicKey: String, leaseId: String, timestamp: Long, signature: String, fee: Long ) { def toTx: Either[ValidationError, LeaseCancelTransaction] = for { _sender <- PublicKey.fromBase58String(senderPublicKey) _signature <- parseBase58(signature, "invalid.signature", SignatureStringLength) _leaseTx <- parseBase58(leaseId, "invalid.leaseTx", SignatureStringLength) _t <- LeaseCancelTransaction.create(1.toByte, _sender, _leaseTx, fee, timestamp, Proofs(_signature)) } yield _t } object SignedLeaseCancelV1Request { implicit val reads: Reads[SignedLeaseCancelV1Request] = ( (JsPath \ "senderPublicKey").read[String] and (JsPath \ "txId").read[String].orElse((JsPath \ "leaseId").read[String]) and (JsPath \ "timestamp").read[Long] and (JsPath \ "signature").read[String] and (JsPath \ "fee").read[Long] )(SignedLeaseCancelV1Request.apply _) implicit val writes: Writes[SignedLeaseCancelV1Request] = Json.writes[SignedLeaseCancelV1Request] }
wavesplatform/Waves
node/src/test/scala/com/wavesplatform/api/http/requests/SignedLeaseCancelV1Request.scala
Scala
mit
1,400
package org.bitcoins.core.gcs import org.bitcoins.core.crypto.DoubleSha256Digest import org.bitcoins.core.number.{UInt64, UInt8} import org.bitcoins.core.protocol.{CompactSizeUInt, NetworkElement} import org.bitcoins.core.protocol.blockchain.Block import org.bitcoins.core.protocol.script.{EmptyScriptPubKey, ScriptPubKey} import org.bitcoins.core.protocol.transaction.{ Transaction, TransactionInput, TransactionOutPoint, TransactionOutput } import org.bitcoins.core.script.control.OP_RETURN import org.bitcoins.core.util.{BitcoinSUtil, CryptoUtil} import scodec.bits.{BitVector, ByteVector} import scala.annotation.tailrec /** * Represents a GCS encoded set with all parameters specified * @see [[https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki#golomb-coded-sets]] * * TODO: Replace ByteVector with a type for keys */ case class GolombFilter( key: SipHashKey, m: UInt64, p: UInt8, n: CompactSizeUInt, encodedData: BitVector) extends NetworkElement { lazy val decodedHashes: Vector[UInt64] = GCS.golombDecodeSet(encodedData, p) /** The hash of this serialized filter */ lazy val hash: DoubleSha256Digest = { CryptoUtil.doubleSHA256(this.bytes) } /** Given the previous FilterHeader, constructs the header corresponding to this */ def getHeader(prevHeader: FilterHeader): FilterHeader = { FilterHeader(filterHash = this.hash, prevHeaderHash = prevHeader.hash) } /** Given the previous FilterHeader hash, constructs the header corresponding to this */ def getHeader(prevHeaderHash: DoubleSha256Digest): FilterHeader = { FilterHeader(filterHash = this.hash, prevHeaderHash = prevHeaderHash) } override def bytes: ByteVector = { n.bytes ++ encodedData.bytes } // TODO: Offer alternative that stops decoding when it finds out if data is there def matchesHash(hash: UInt64): Boolean = { @tailrec def binarySearch( from: Int, to: Int, hash: UInt64, set: Vector[UInt64]): Boolean = { if (to < from) { false } else { val index = (to + from) / 2 val otherHash = set(index) if (hash == otherHash) { true } else if (hash < otherHash) { binarySearch(from, index - 1, hash, set) } else { binarySearch(index + 1, to, hash, set) } } } binarySearch(from = 0, to = n.toInt - 1, hash, decodedHashes) } def matches(data: ByteVector): Boolean = { val f = n.num * m val hash = GCS.hashToRange(data, f, key) matchesHash(hash) } } object BlockFilter { /** * Returns all ScriptPubKeys from a Block's outputs that are relevant * to BIP 158 Basic Block Filters * @see [[https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki#contents]] */ def getOutputScriptPubKeysFromBlock(block: Block): Vector[ScriptPubKey] = { val transactions: Vector[Transaction] = block.transactions.toVector val newOutputs: Vector[TransactionOutput] = transactions.flatMap(_.outputs) newOutputs .filterNot(_.scriptPubKey.asm.contains(OP_RETURN)) .filterNot(_.scriptPubKey == EmptyScriptPubKey) .map(_.scriptPubKey) } /** * Returns all ScriptPubKeys from a Block's inputs that are relevant * to BIP 158 Basic Block Filters * @see [[https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki#contents]] */ def getInputScriptPubKeysFromBlock( block: Block, utxoProvider: TempUtxoProvider): Vector[ScriptPubKey] = { val transactions: Vector[Transaction] = block.transactions.toVector val noCoinbase: Vector[Transaction] = transactions.tail val inputs: Vector[TransactionInput] = noCoinbase.flatMap(_.inputs) val outpointsSpent: Vector[TransactionOutPoint] = inputs.map(_.previousOutput) val prevOutputs: Vector[TransactionOutput] = outpointsSpent.flatMap(utxoProvider.getUtxo) prevOutputs .filterNot(_.scriptPubKey == EmptyScriptPubKey) .map(_.scriptPubKey) } /** * Given a Block and access to the UTXO set, constructs a Block Filter for that block * @see [[https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki#block-filters]] */ def apply(block: Block, utxoProvider: TempUtxoProvider): GolombFilter = { val prevOutputScripts: Vector[ScriptPubKey] = getInputScriptPubKeysFromBlock(block, utxoProvider) BlockFilter(block, prevOutputScripts) } /** * Given a Block and access to the previous output scripts, constructs a Block Filter for that block * @see [[https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki#block-filters]] */ def apply( block: Block, prevOutputScripts: Vector[ScriptPubKey]): GolombFilter = { val keyBytes: ByteVector = block.blockHeader.hash.bytes.take(16) val key: SipHashKey = SipHashKey(keyBytes) val newScriptPubKeys: Vector[ByteVector] = getOutputScriptPubKeysFromBlock(block).map(_.asmBytes) val prevOutputScriptBytes: Vector[ByteVector] = prevOutputScripts .filterNot(_ == EmptyScriptPubKey) .map(_.asmBytes) val allOutputs = (prevOutputScriptBytes ++ newScriptPubKeys).distinct GCS.buildBasicBlockFilter(allOutputs, key) } def fromBytes( bytes: ByteVector, blockHash: DoubleSha256Digest): GolombFilter = { val n = CompactSizeUInt.fromBytes(bytes) val filterBytes = bytes.drop(n.bytes.length) val keyBytes: ByteVector = blockHash.bytes.take(16) val key: SipHashKey = SipHashKey(keyBytes) GolombFilter(key, FilterType.Basic.M, FilterType.Basic.P, n, filterBytes.toBitVector) } def fromHex(hex: String, blockHash: DoubleSha256Digest): GolombFilter = { fromBytes(BitcoinSUtil.decodeHex(hex), blockHash) } }
bitcoin-s/bitcoin-s-core
core/src/main/scala/org/bitcoins/core/gcs/GolombFilter.scala
Scala
mit
5,868
package blended.streams.worklist import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Keep, Sink, Source} import akka.stream.{ActorMaterializer, KillSwitches, Materializer, OverflowStrategy} import akka.testkit.TestKit import blended.testsupport.scalatest.LoggingFreeSpecLike import blended.util.logging.Logger import org.scalatest.Matchers import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} case class DummyItem( id : String ) extends WorklistItem class WorklistSpec extends TestKit(ActorSystem("Worklist")) with LoggingFreeSpecLike with Matchers { private val log = Logger[WorklistSpec] private implicit val materialzer : Materializer = ActorMaterializer() private implicit val eCtxt : ExecutionContext = system.dispatcher private val defaultCooldown = 500.millis def worklist(items : WorklistItem*) = Worklist("test", items) private def withWorklistManager(events : WorklistEvent*)(f : Seq[WorklistEvent] => Unit) : Unit = withWorklistManager(defaultCooldown, events:_*)(f) private def withWorklistManager(cooldown: FiniteDuration, events : WorklistEvent*)(f : Seq[WorklistEvent] => Unit) : Unit = { val source = Source.actorRef(100, OverflowStrategy.dropBuffer) val sink = Sink.seq[WorklistEvent] val mgr : Flow[WorklistEvent, WorklistEvent, NotUsed] = WorklistManager.flow("worklist", log) val ((actor, killswitch), result) = source .viaMat(KillSwitches.single)(Keep.both) .viaMat(Flow.fromGraph(mgr))(Keep.left) .toMat(sink)(Keep.both) .run() akka.pattern.after(cooldown, system.scheduler) { Future { killswitch.shutdown() } } events.foreach(e => actor ! e) f(Await.result(result, cooldown * 2)) } "The worklist manager should" - { "Generate a Started worklist event once the worklist is kicked off" in { val wl = Worklist("test", Seq(DummyItem("item"))) withWorklistManager(1.second, WorklistStarted(wl)) { r => r should have size(2) assert(r.head.isInstanceOf[WorklistStarted]) assert(r.last.isInstanceOf[WorklistTerminated]) } } "Ignore a start event for a worklist that is already started" in { val wl = Worklist("test", Seq(DummyItem("item"))) withWorklistManager(WorklistStarted(wl), WorklistStarted(wl)) { r => r should have size(2) assert(r.head.isInstanceOf[WorklistStarted]) assert(r.last.isInstanceOf[WorklistTerminated]) } } "Create a Worklist Terminated event with state [completed] once all items are completed" in { val item1 = DummyItem("item1") val item2 = DummyItem("item2") withWorklistManager( WorklistStarted(worklist(item1)), WorklistStepCompleted(worklist(item2), WorklistState.Completed), WorklistStepCompleted(worklist(item1), WorklistState.Completed), ) { r => r should have size(2) assert(r.head.isInstanceOf[WorklistStarted]) assert(r.last.isInstanceOf[WorklistTerminated]) } withWorklistManager( WorklistStarted(worklist(item1, item2)), WorklistStepCompleted(worklist(item2), WorklistState.Completed), WorklistStepCompleted(worklist(item1), WorklistState.Completed), ) { r => r should have size(2) assert(r.head.isInstanceOf[WorklistStarted]) r.last match { case t : WorklistTerminated => t.state should be (WorklistState.Completed) case _ => fail() } } } "Send a Terminated event with state [timeOut] once a worklist item has timed out" in { val item1 = DummyItem("item1") val item2 = DummyItem("item2") withWorklistManager( WorklistStarted(worklist(item1, item2)), ) { r => r should have size(2) assert(r.head.isInstanceOf[WorklistStarted]) r.last match { case t : WorklistTerminated => t.state should be (WorklistState.TimeOut) case _ => fail() } } } } }
lefou/blended
blended.streams/src/test/scala/blended/streams/worklist/WorklistSpec.scala
Scala
apache-2.0
4,112
package org.vitrivr.adampro.data.entity import org.apache.spark.sql.functions.{col, udf} import org.apache.spark.sql.types.{LongType, StructField, StructType} import org.apache.spark.sql.{DataFrame, Row, SaveMode} import org.apache.spark.storage.StorageLevel import org.vitrivr.adampro.config.AttributeNames import org.vitrivr.adampro.data.datatypes.AttributeTypes._ import org.vitrivr.adampro.data.datatypes.{AttributeTypes, TupleID} import org.vitrivr.adampro.data.entity.Entity.{AttributeName, EntityName} import org.vitrivr.adampro.data.index.Index import org.vitrivr.adampro.process.SharedComponentContext import org.vitrivr.adampro.query.ast.internal.HintBasedScanExpression.{QUERY_MARKER, log} import org.vitrivr.adampro.query.query.Predicate import org.vitrivr.adampro.storage.StorageHandler import org.vitrivr.adampro.storage.engine.ParquetEngine import org.vitrivr.adampro.utils.Logging import org.vitrivr.adampro.utils.exception.{EntityExistingException, EntityNotExistingException, EntityNotProperlyDefinedException, GeneralAdamException} import scala.collection.mutable.ListBuffer import scala.util.{Failure, Success, Try} /** * adamtwo * * Ivan Giangreco * October 2015 */ case class Entity(entityname: EntityName)(@transient implicit val ac: SharedComponentContext) extends Serializable with Logging { private val mostRecentVersion = this.synchronized { if (!ac.versionManager.containsEntity(entityname)) { ac.versionManager.put(entityname) } ac.versionManager.getEntity(entityname) } private var currentVersion = mostRecentVersion.value /** * Gets the primary key. * * @return */ lazy val pk: AttributeDefinition = ac.catalogManager.getPrimaryKey(entityname).get private var _schema: Option[Seq[AttributeDefinition]] = None /** * Schema of the entity. * * @param nameFilter filter for name * @param typeFilter filter for type * @param fullSchema add internal fields as well (e.g. TID) * @return */ def schema(nameFilter: Option[Seq[AttributeName]] = None, typeFilter: Option[Seq[AttributeType]] = None, fullSchema: Boolean = true): Seq[AttributeDefinition] = { checkVersions() if (_schema.isEmpty) { _schema = Some(ac.catalogManager.getAttributes(entityname).get.filterNot(_.pk)) } var tmpSchema = _schema.get if (nameFilter.isDefined) { tmpSchema = tmpSchema.filter(attribute => nameFilter.get.contains(attribute.name)) } if (typeFilter.isDefined) { tmpSchema = tmpSchema.filter(attribute => typeFilter.get.map(_.name).contains(attribute.attributeType.name)) } if (fullSchema) { tmpSchema = tmpSchema.+:(pk) } tmpSchema } /** * */ private[entity] lazy val handlers = schema(fullSchema = false).map(_.storagehandler()).distinct private var _data: Option[DataFrame] = None /** * Reads entity data and caches. */ private def readData(): Unit = { val handlerData = schema(fullSchema = false).groupBy(_.storagehandler).map { case (handler, attributes) => val status = handler.read(entityname, attributes.+:(pk)) if (status.isFailure) { log.error("failure when reading data", status.failed.get) } status }.filter(_.isSuccess).map(_.get) if (handlerData.nonEmpty) { if (handlerData.size == 1) { _data = Some(handlerData.head) } else { _data = Some(handlerData.reduce(_.join(_, pk.name)).coalesce(ac.config.defaultNumberOfPartitions)) } } else { _data = None } } /** * Gets the full entity data. * * @param nameFilter filters for names * @param typeFilter filters for field types * @param handlerFilter filters for storage handler * @param predicates attributename -> predicate (will only be applied if supported by handler) * @return */ def getData(nameFilter: Option[Seq[AttributeName]] = None, typeFilter: Option[Seq[AttributeType]] = None, handlerFilter: Option[StorageHandler] = None, predicates: Seq[Predicate] = Seq()): Option[DataFrame] = { checkVersions() if (_data.isEmpty) { readData() } var data = _data //possibly filter for current call var filteredData = data //predicates if (predicates.nonEmpty) { val handlerData = schema(fullSchema = false).groupBy(_.storagehandler).map { case (handler, attributes) => val predicate = predicates.filter(p => (p.attribute == pk.name.toString || attributes.map(_.name).contains(p.attribute))) val status = handler.read(entityname, attributes, predicate.toList) if (status.isFailure) { log.error("failure when reading data", status.failed.get) } status }.filter(_.isSuccess).map(_.get) if (handlerData.nonEmpty) { if (handlerData.size == 1) { filteredData = Some(handlerData.head) } else { filteredData = Some(handlerData.reduce(_.join(_, pk.name)).coalesce(ac.config.defaultNumberOfPartitions)) } } } /*else if(predicates.nonEmpty) { predicates.foreach { predicate => filteredData = filteredData.map(data => data.filter(col(predicate.attribute).isin(predicate.values: _*))) } }*/ //handler if (handlerFilter.isDefined) { //filter by handler, name and type filteredData = filteredData.map(_.select(schema(nameFilter, typeFilter).filter(a => a.storagehandler.equals(handlerFilter.get)).map(attribute => col(attribute.name)): _*)) } //name and type filter if (nameFilter.isDefined || typeFilter.isDefined) { //filter by name and type filteredData = filteredData.map(_.select(schema(nameFilter, typeFilter).map(attribute => col(attribute.name)): _*)) } filteredData } /** * Gets feature data. * * @return */ def getFeatureData: Option[DataFrame] = getData(typeFilter = Some(Seq(VECTORTYPE, SPARSEVECTORTYPE))) /** * Returns feature data quickly. Should only be used for internal purposes. * * @return */ private def getFeatureDataFast: Option[DataFrame] = { val handlerData = schema(fullSchema = false).filter(_.storagehandler.engine.isInstanceOf[ParquetEngine]).groupBy(_.storagehandler).map { case (handler, attributes) => val status = handler.read(entityname, attributes.+:(pk)) if (status.isFailure) { log.error("failure when reading data", status.failed.get) } status }.filter(_.isSuccess).map(_.get) if (handlerData.nonEmpty) { if (handlerData.size == 1) { Some(handlerData.head) } else { Some(handlerData.reduce(_.join(_, pk.name)).coalesce(ac.config.defaultNumberOfPartitions)) } } else { None } } /** * Gets feature attribute and pk attribute; use this for indexing purposes. * * @param attribute attribute that is indexed * @return */ def getAttributeData(attribute: String)(implicit ac: SharedComponentContext): Option[DataFrame] = getData(Some(Seq(pk.name, attribute))) /** * Caches the data. */ def cache(): Unit = { if (_data.isEmpty) { readData() } if (_data.isDefined) { _data = Some(_data.get.persist(StorageLevel.MEMORY_ONLY)) _data.get.count() //counting for caching } } /** * Returns number of elements in the entity. * * @return */ def count: Long = { checkVersions() var count = ac.catalogManager.getEntityOption(entityname, Some(Entity.COUNT_KEY)).get.get(Entity.COUNT_KEY).map(_.toLong) if (count.isEmpty) { if (getData().isDefined) { count = Some(getData().get.count()) ac.catalogManager.updateEntityOption(entityname, Entity.COUNT_KEY, count.get.toString) ac.catalogManager.updateEntityOption(entityname, Entity.APPROX_COUNT_KEY, count.get.toString) } } count.getOrElse(0) } /** * Gives preview of entity. * * @param k number of elements to show in preview * @return */ def show(k: Int): Option[DataFrame] = getData().map(_.select(schema(fullSchema = false).map(_.name).map(x => col(x.toString)): _*).limit(k)) private val MAX_INSERTS_BEFORE_VACUUM = ac.catalogManager.getEntityOption(entityname, Some(Entity.MAX_INSERTS_VACUUMING)).get.get(Entity.MAX_INSERTS_VACUUMING).map(_.toInt).getOrElse(Entity.DEFAULT_MAX_INSERTS_BEFORE_VACUUM) /** * Returns the total number of inserts in entity */ private def totalNumberOfInserts() = { ac.catalogManager.getEntityOption(entityname, Some(Entity.N_INSERTS)).get.get(Entity.N_INSERTS).map(_.toInt).getOrElse(0) } /** * Returns the total number of inserts in entity since last vacuuming */ private def totalNumberOfInsertsSinceVacuuming() = { ac.catalogManager.getEntityOption(entityname, Some(Entity.N_INSERTS_VACUUMING)).get.get(Entity.N_INSERTS_VACUUMING).map(_.toInt).getOrElse(0) } /** * * @param i increment by */ private def incrementNumberOfInserts(i: Int = 1) = { ac.catalogManager.updateEntityOption(entityname, Entity.N_INSERTS, (totalNumberOfInserts + i).toString) ac.catalogManager.updateEntityOption(entityname, Entity.N_INSERTS_VACUUMING, (totalNumberOfInsertsSinceVacuuming + i).toString) } /** * Inserts data into the entity. * * @param data data to insert * @param ignoreChecks whether to ignore checks * @return */ def insert(data: DataFrame, ignoreChecks: Boolean = false): Try[Void] = { log.trace("inserting data into entity") try { // 12 bits for the last 12 bits of the current nano time // 28 bits for the current insertion id, i.e., the number of inserts so far into entity // 24 bits for the tuple id within the insertion, i.e., the index within the insertion val ninserts = totalNumberOfInserts() val ninsertsvacuum = totalNumberOfInsertsSinceVacuuming() val currentTimeBits = (System.nanoTime() & 4095) << 52 val tupleidUDF = udf((count: Long) => { val ninsertsBits = ((ninserts.toLong + 1) & 268435455) << 24 val countBits = (count & 16777215) currentTimeBits + ninsertsBits + countBits }) //attach TID to rows val rdd = data.rdd.zipWithIndex.map { case (r: Row, id: Long) => Row.fromSeq(id +: r.toSeq) } var insertion = ac.sqlContext.createDataFrame( rdd, StructType(StructField(AttributeNames.internalIdColumnName + "-tmp", LongType) +: data.schema.fields)) val insertionSize = insertion.count() insertion = insertion.withColumn(AttributeNames.internalIdColumnName, tupleidUDF(col(AttributeNames.internalIdColumnName + "-tmp"))).drop(AttributeNames.internalIdColumnName + "-tmp") //AUTOTYPE attributes val autoAttributes = schema(typeFilter = Some(Seq(AttributeTypes.AUTOTYPE)), fullSchema = false) if (autoAttributes.nonEmpty) { if (autoAttributes.map(_.name).exists(x => data.schema.fieldNames.map(AttributeNameHolder(_)).contains(x))) { return Failure(new GeneralAdamException("the attributes " + autoAttributes.map(_.name).mkString(", ") + " have been specified as auto and should therefore not be provided")) } autoAttributes.foreach { attribute => insertion = insertion.withColumn(attribute.name, col(AttributeNames.internalIdColumnName)) } } insertion = insertion.repartition(ac.config.defaultNumberOfPartitions) //TODO: check insertion schema and entity schema before trying to insert val lock = ac.lockManager.getLockEntity(entityname) val stamp = lock.writeLock() try { //insertion per handler val handlers = insertion.schema.fields .map(field => schema(Some(Seq(field.name)), fullSchema = false)).filterNot(_.isEmpty).map(_.head) .groupBy(_.storagehandler) handlers.foreach { case (handler, attributes) => val fields = if (!attributes.exists(_.name == pk.name)) { attributes.+:(pk) } else { attributes } val df = insertion.select(fields.map(attribute => col(attribute.name)): _*) val status = handler.write(entityname, df, fields, SaveMode.Append, Map("allowRepartitioning" -> "true", "partitioningKey" -> pk.name)) if (status.isFailure) { throw status.failed.get } } incrementNumberOfInserts() markStale() //TODO: insert also into index val apxCountOld = ac.catalogManager.getEntityOption(entityname, Some(Entity.APPROX_COUNT_KEY)).get.getOrElse(Entity.APPROX_COUNT_KEY, "0").toInt ac.catalogManager.updateEntityOption(entityname, Entity.APPROX_COUNT_KEY, (apxCountOld + insertionSize).toString) } finally { lock.unlockWrite(stamp) } if (ninsertsvacuum > MAX_INSERTS_BEFORE_VACUUM || ninsertsvacuum % (MAX_INSERTS_BEFORE_VACUUM / 5) == 0 && getFeatureDataFast.isDefined && getFeatureDataFast.get.rdd.getNumPartitions > Entity.DEFAULT_MAX_PARTITIONS) { log.info("number of inserts necessitates now re-partitioning") if (schema(fullSchema = false).filter(_.storagehandler.engine.isInstanceOf[ParquetEngine]).nonEmpty) { //entity is partitionable vacuum() } else { //entity is not partitionable, increase number of max inserts to max value ac.catalogManager.updateEntityOption(entityname, Entity.MAX_INSERTS_VACUUMING, Int.MaxValue.toString) } } Success(null) } catch { case e: Exception => Failure(e) } } /** * Vacuums the entity, i.e., clean up operations for entity */ def vacuum(): Unit = { val lock = ac.lockManager.getLockEntity(entityname) val stamp = lock.writeLock() try { EntityPartitioner(this, ac.config.defaultNumberOfPartitions, attribute = Some(pk.name)) ac.catalogManager.updateEntityOption(entityname, Entity.N_INSERTS_VACUUMING, 0.toString) } finally { lock.unlock(stamp) } } /** * Deletes tuples that fit the predicates. * * @param predicates */ def delete(predicates: Seq[Predicate]): Int = { var newData = getData().get predicates.foreach { predicate => newData = newData.filter("NOT " + predicate.sqlString) } //TODO: delete also from index val handlers = newData.schema.fields .map(field => schema(Some(Seq(field.name)), fullSchema = false)).filterNot(_.isEmpty).map(_.head) .groupBy(_.storagehandler) handlers.foreach { case (handler, attributes) => val fields = if (!attributes.exists(_.name == pk.name)) { attributes.+:(pk) } else { attributes } val df = newData.select(fields.map(attribute => col(attribute.name)): _*) val status = handler.write(entityname, df, fields, SaveMode.Overwrite, Map("allowRepartitioning" -> "true", "partitioningKey" -> pk.name)) if (status.isFailure) { throw status.failed.get } } val oldCount = count markStale() val newCount = count (oldCount - newCount).toInt } /** * Returns all available indexes for the entity. * * @return */ def indexes: Seq[Try[Index]] = { ac.catalogManager.listIndexes(Some(entityname)).get.map(index => Index.load(index)) } /** * Marks the entity stale, but without changes to the data. */ def markSoftStale(): Unit = { mostRecentVersion.add(1) //_schema = None schema cannot be changed _data.map(_.unpersist()) _data = None ac.cacheManager.invalidateEntity(entityname) currentVersion = mostRecentVersion.value } /** * Marks the data stale (e.g., if new data has been inserted to entity). */ def markStale(): Unit = { markSoftStale() indexes.map(_.map(_.markStale())) ac.catalogManager.deleteEntityOption(entityname, Entity.COUNT_KEY) } /** * Checks if cached data is up to date, i.e. if version of local entity corresponds to global version of entity */ private def checkVersions(): Unit = { if (currentVersion < mostRecentVersion.value) { //_schema = None schema cannot be changed _data.map(_.unpersist()) _data = None ac.cacheManager.invalidateEntity(entityname) currentVersion = mostRecentVersion.value } } /** * Drops the data of the entity. */ def drop(): Unit = { Index.dropAll(entityname) try { schema(fullSchema = false).groupBy(_.storagehandler) .foreach { case (handler, attributes) => try { handler.drop(entityname) } catch { case e: Exception => log.error("exception when dropping entity " + entityname, e) } } } catch { case e: Exception => log.error("exception when dropping entity " + entityname, e) } finally { ac.catalogManager.dropEntity(entityname) } } /** * Returns stored entity options. */ private def options = ac.catalogManager.getEntityOption(entityname) /** * Returns a map of properties to the entity. Useful for printing. * * @param options */ def propertiesMap(options: Map[String, String] = Map()) = { val lb = ListBuffer[(String, String)]() lb.append("attributes" -> schema(fullSchema = false).map(field => field.name).mkString(",")) lb.append("indexes" -> ac.catalogManager.listIndexes(Some(entityname)).get.mkString(",")) val apxCount = ac.catalogManager.getEntityOption(entityname, Some(Entity.APPROX_COUNT_KEY)).get.get(Entity.APPROX_COUNT_KEY) if (apxCount.isDefined) { lb.append("apxCount" -> apxCount.get) } if (options.contains("partitions") && options("partitions") == "true") { try { lb.append("partitions" -> getFeatureDataFast.map(_.rdd.getNumPartitions.toString).getOrElse("none")) } catch { case e: Exception => log.warn("no partition information retrievable, possibly no data yet inserted") } } if (options.contains("count") && options("count") == "true") { lb.append("count" -> count.toString) } lb.toMap } /** * Returns a map of properties to a specified attribute. Useful for printing. * * @param attribute name of attribute * @param options * @return */ def attributePropertiesMap(attribute: String, options: Map[String, String] = Map()): Map[String, String] = { schema(Some(Seq(attribute)), fullSchema = false).headOption.map(_.propertiesMap).getOrElse(Map()) } override def equals(that: Any): Boolean = that match { case that: Entity => this.entityname.equals(that.entityname) case _ => false } override def hashCode: Int = entityname.hashCode } object Entity extends Logging { type EntityName = EntityNameHolder type AttributeName = AttributeNameHolder private val COUNT_KEY = "ntuples" private val APPROX_COUNT_KEY = "ntuplesapprox" private val N_INSERTS = "ninserts" private val N_INSERTS_VACUUMING = "ninsertsvac" private val MAX_INSERTS_VACUUMING = "maxinserts" private val DEFAULT_MAX_INSERTS_BEFORE_VACUUM = 100 private val DEFAULT_MAX_PARTITIONS = 100 /** * Check if entity exists. Note that this only checks the catalog; the entity may still exist in the file system. * * @param entityname name of entity * @return */ def exists(entityname: EntityName)(implicit ac: SharedComponentContext): Boolean = { val res = ac.catalogManager.existsEntity(entityname) if (res.isFailure) { throw res.failed.get } res.get } /** * Creates an entity. * * @param entityname name of the entity * @param creationAttributes attributes of entity * @param ifNotExists if set to true and the entity exists, the entity is just returned; otherwise an error is thrown * @return */ def create(entityname: EntityName, creationAttributes: Seq[AttributeDefinition], ifNotExists: Boolean = false)(implicit ac: SharedComponentContext): Try[Entity] = { try { //checks if (exists(entityname)) { if (!ifNotExists) { return Failure(EntityExistingException()) } else { return load(entityname) } } if (creationAttributes.isEmpty) { return Failure(EntityNotProperlyDefinedException("Entity " + entityname + " will have no attributes")) } val reservedNames = creationAttributes.map(a => a.name -> AttributeNames.isNameReserved(a.name)).filter(_._2 == true) if (reservedNames.nonEmpty) { return Failure(EntityNotProperlyDefinedException("Entity defined with field " + reservedNames.map(_._1).mkString + ", but name is reserved")) } if (creationAttributes.map(_.name).distinct.length != creationAttributes.length) { return Failure(EntityNotProperlyDefinedException("Entity defined with duplicate fields; note that all attribute names have to be lower-case.")) } val pk = new AttributeDefinition(AttributeNames.internalIdColumnName, TupleID.AdamTupleID, "") val attributes = creationAttributes.+:(pk) ac.catalogManager.createEntity(entityname, attributes) ac.catalogManager.updateEntityOption(entityname, COUNT_KEY, "0") ac.catalogManager.updateEntityOption(entityname, APPROX_COUNT_KEY, "0") creationAttributes.groupBy(_.storagehandler).foreach { case (handler, handlerAttributes) => val status = handler.create(entityname, handlerAttributes.+:(pk)) if (status.isFailure) { throw new GeneralAdamException("failing on handler " + handler.name + ":" + status.failed.get) } } Success(Entity(entityname)(ac)) } catch { case e: Exception => //drop everything created in handlers ac.storageManager.handlers.values.foreach { handler => try { handler.drop(entityname) } catch { case e: Exception => //careful: if entity has not been created yet in handler then we may get an exception } } //drop from catalog ac.catalogManager.dropEntity(entityname, true) Failure(e) } } /** * Lists names of all entities. * * @return name of entities */ def list(implicit ac: SharedComponentContext): Seq[EntityName] = ac.catalogManager.listEntities().get /** * Loads an entity. * * @param entityname name of entity * @return */ def load(entityname: EntityName, cache: Boolean = false)(implicit ac: SharedComponentContext): Try[Entity] = { log.trace(QUERY_MARKER, "load entity") val entity = if (ac.cacheManager.containsEntity(entityname) && ac.cacheManager.getEntity(entityname).isSuccess) { ac.cacheManager.getEntity(entityname) } else { val loadedEntity = Entity.loadEntityMetaData(entityname)(ac) if (loadedEntity.isSuccess) { ac.cacheManager.put(entityname, loadedEntity.get) if (cache) { loadedEntity.get.cache() } } loadedEntity } log.trace(QUERY_MARKER, "loaded entity") entity } /** * Loads the entityname metadata without loading the data itself yet. * * @param entityname name of entity * @return */ def loadEntityMetaData(entityname: EntityName)(implicit ac: SharedComponentContext): Try[Entity] = { if (!exists(entityname)) { return Failure(EntityNotExistingException.withEntityname(entityname)) } try { Success(Entity(entityname)(ac)) } catch { case e: Exception => Failure(e) } } /** * Drops an entity. * * @param entityname name of entity * @param ifExists if set to true, no error is raised if entity does not exist * @return */ def drop(entityname: EntityName, ifExists: Boolean = false)(implicit ac: SharedComponentContext): Try[Void] = { try { if (!exists(entityname)) { if (!ifExists) { return Failure(EntityNotExistingException.withEntityname(entityname)) } else { return Success(null) } } Entity.load(entityname).get.drop() ac.cacheManager.invalidateEntity(entityname) Success(null) } catch { case e: Exception => Failure(e) } } }
dbisUnibas/ADAMpro
src/main/scala/org/vitrivr/adampro/data/entity/Entity.scala
Scala
mit
24,441
package com.github.wartman4404.gldraw import java.util.concurrent.atomic.AtomicBoolean import android.graphics.SurfaceTexture import android.os.{Handler, Looper, Message, SystemClock} import android.util.Log import android.graphics.Bitmap import unibrush.Layer import MainActivity.Rotation class TextureSurfaceThread(surface: SurfaceTexture, private var motionHandler: MotionEventHandler, handlerCallback: (TextureSurfaceThread)=>Unit, errorCallback: (Exception)=>Unit) extends Thread with Handler.Callback with AndroidImplicits { import TextureSurfaceThread.Constants._ import TextureSurfaceThread._ private var handler: Handler = null private val running = new AtomicBoolean(true) var targetFramerate = 15 private val matrix = new Array[Float](16) private var eglHelper: EGLHelper = null private var pOutputShader: Option[CopyShader] = None def outputShader = pOutputShader private var glinit: Option[GLInit] = None private var replay = Replay.nullReplay @native protected def nativeUpdateGL(data: GLInit): Unit @native protected def nativeDrawQueuedPoints(data: GLInit, handler: MotionEventHandler, transformMatrix: Array[Float]): Unit @native protected def nativeFinishLuaScript(data: GLInit, handler: MotionEventHandler): Unit @native protected def nativeClearFramebuffer(data: GLInit): Unit @native protected def nativeDrawImage(data: GLInit, bitmap: Bitmap, rotation: Rotation): Unit @native protected def nativeSetAnimShader(data: GLInit, shader: CopyShader): Boolean @native protected def nativeSetCopyShader(data: GLInit, shader: CopyShader): Boolean @native protected def nativeSetPointShader(data: GLInit, shader: PointShader): Boolean @native protected def nativeSetBrushTexture(data: GLInit, t: TexturePtr): Unit @native protected def nativeExportPixels(data: GLInit): Bitmap @native protected def nativeSetInterpolator(data: GLInit, script: LuaScript): Unit @native protected def nativeAddLayer(data: GLInit, copyshader: CopyShader, pointshader: PointShader, pointidx: Int): Unit @native protected def nativeClearLayers(data: GLInit): Unit @native protected def nativeLoadUndo(data: GLInit, pos: Int): Unit @native protected def nativePushUndoFrame(data: GLInit): Int @native protected def nativeClearUndoFrames(data: GLInit): Unit //@native protected def nativeSetBrushProperties(props: BrushProperties): Unit @native protected def nativeSetBrushColor(data: GLInit, color: Int): Unit @native protected def nativeSetBrushSize(data: GLInit, size: Float): Unit override def run() = { Looper.prepare() handler = new Handler(this) handlerCallback(this) Log.i("everybody-draws", "gl thread: entering message loop") Looper.loop() } override def handleMessage(msg: Message): Boolean = { msg.what match { case MSG_NEW_FRAME => { if (running.get()){ val next = SystemClock.uptimeMillis() + 1000 / targetFramerate val gl: GLInit = GLInit.fromMessage(msg) try { if (replay == Replay.nullReplay) { drawQueuedPoints(gl) } else { drawReplayFrame(gl, replay) } } catch { case e: LuaException => { nativeSetInterpolator(gl, LuaScript(gl, null)) errorCallback(e) } } updateGL(gl) val newmessage = gl.toMessage(handler.obtainMessage(MSG_NEW_FRAME)) handler.sendMessageAtTime(newmessage, next) } } case MSG_END_GL => { glinit.foreach(GLInit.destroy _) glinit = None eglHelper.finish() Looper.myLooper().quit() } case MSG_BEGIN_GL => { Log.i("everybody-draws", "gl thread: got begin_gl message"); eglHelper = new EGLHelper() eglHelper.init(surface) val BeginGLArgs(undoCallback, beginGLCallback) = msg.obj.asInstanceOf[BeginGLArgs] val gl = GLInit(msg.arg1, msg.arg2, undoCallback) glinit = Some(gl) pOutputShader = Some(CopyShader(gl, null, null)) android.opengl.Matrix.orthoM(matrix, 0, 0, msg.arg1, msg.arg2, 0, -1, 1) updateGL(gl) beginGLCallback(gl) } } true } def beginGL(x: Int, y: Int, initCallback: (GLInit)=>Unit, undoCallback: UndoCallback): Unit = { handler.obtainMessage(MSG_BEGIN_GL, x, y, BeginGLArgs(undoCallback, initCallback)).sendToTarget() } def startFrames(): Unit = { glinit match { case Some(gl) => startFrames(gl) case None => Log.e("everybody-draws", "gl thread: unable to start frames, no gl inited!") } } def startFrames(gl: GLInit): Unit = { this.running.set(true) gl.toMessage(handler.obtainMessage(MSG_NEW_FRAME)).sendToTarget() } def stopFrames() = { this.running.set(false) } // TODO: check if we're already on the gl thread private def runHere(fn: => Unit) = { handler.post(() => { fn; () }) } def initScreen(gl: GLInit, bitmap: Option[Bitmap], rotation: Rotation) = { for (b <- bitmap) { nativeDrawImage(gl, b, rotation) b.recycle() } } def clearScreen(gl: GLInit) = { nativeClearFramebuffer(gl) } // callback runs on gl thread def getBitmap(cb: (GLInit, Bitmap)=>Any) = withGL(gl => cb(gl, nativeExportPixels(gl))) def getBitmapSynchronized() = { var bitmap: Bitmap = null val notify = new Object() notify.synchronized { getBitmap((gl, x) => { bitmap = x notify.synchronized { notify.notify() } }) notify.wait() } bitmap } def cleanupGL() = { handler.obtainMessage(MSG_END_GL).sendToTarget() } def drawBitmap(gl: GLInit, bitmap: Bitmap) = { nativeDrawImage(gl, bitmap, MainActivity.NoRotation) } // private private def drawQueuedPoints(g: GLInit) = { nativeDrawQueuedPoints(g, motionHandler, matrix) } def finishLuaScript(gl: GLInit) = { //Log.i("everybody-draws", "gl thread: finishing lua script - final draw") nativeDrawQueuedPoints(gl, motionHandler, matrix) //Log.i("everybody-draws", "gl thread: finishing lua script - unloading") nativeFinishLuaScript(gl, motionHandler) } private def drawReplayFrame(gl: GLInit, r: Replay) = { val finished = Replay.advanceFrame(gl, r, matrix) if (finished) { Replay.destroy(r) this.replay = Replay.nullReplay } } private def updateGL(g: GLInit) { nativeUpdateGL(g) } def setBrushTexture(gl: GLInit, texture: Texture) { nativeSetBrushTexture(gl, texture.ptr) } def beginReplay() { withGL(gl => { replay = Replay.init(gl) }) } def clearLayers(gl: GLInit) = nativeClearLayers(gl) def addLayer(gl: GLInit, copyshader: CopyShader, pointshader: PointShader, pointidx: Int) = { nativeAddLayer(gl, copyshader, pointshader, pointidx) } def loadUndo(gl: GLInit, pos: Int) = nativeLoadUndo(gl, pos) def pushUndoFrame(gl: GLInit): Int = nativePushUndoFrame(gl) def clearUndoFrames(gl: GLInit) = nativeClearUndoFrames(gl) // only set values, could maybe run on main thread def setAnimShader(gl: GLInit, shader: CopyShader) = nativeSetAnimShader(gl, shader) def setPointShader(gl: GLInit, shader: PointShader) = nativeSetPointShader(gl, shader) def setInterpScript(gl: GLInit, script: LuaScript) = nativeSetInterpolator(gl, script) def setCopyShader(gl: GLInit, shader: CopyShader) = nativeSetCopyShader(gl, shader) def setBrushColor(gl: GLInit, color: Int) = nativeSetBrushColor(gl, color) def setBrushSize(gl: GLInit, size: Float) = nativeSetBrushSize(gl, size) def withGL(cb: (GLInit) => Unit) = { val stack = new RuntimeException(); for (gl <- glinit) { runHere { try { cb(gl) } catch { case e: Exception => { stack.initCause(e) throw stack } } }} } } object TextureSurfaceThread { object Constants { val MSG_NEW_FRAME = 1 val MSG_END_GL = 2 val MSG_BEGIN_GL = 3 val MSG_BEGIN_FRAMES = 4 } case class BeginGLArgs(undoCallback: UndoCallback, initCallback: (GLInit) => Unit) }
wartman4404/everybody-draw
src/main/scala/TextureSurfaceRenderer.scala
Scala
apache-2.0
8,159
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.history import scala.collection.mutable import org.apache.spark.deploy.history.{EventFilter, EventFilterBuilder, JobEventFilter} import org.apache.spark.internal.Logging import org.apache.spark.scheduler._ import org.apache.spark.sql.execution.SQLExecution import org.apache.spark.sql.execution.ui._ import org.apache.spark.sql.streaming.StreamingQueryListener /** * This class tracks live SQL executions, and pass the list to the [[SQLLiveEntitiesEventFilter]] * to help SQLLiveEntitiesEventFilter to accept live SQL executions as well as relevant * jobs (+ stages/tasks/RDDs). * * Note that this class only tracks the jobs which are relevant to SQL executions - cannot classify * between finished job and live job without relation of SQL execution. */ private[spark] class SQLEventFilterBuilder extends SparkListener with EventFilterBuilder { private val liveExecutionToJobs = new mutable.HashMap[Long, mutable.Set[Int]] private val jobToStages = new mutable.HashMap[Int, Set[Int]] private val stageToTasks = new mutable.HashMap[Int, mutable.Set[Long]] private val stageToRDDs = new mutable.HashMap[Int, Set[Int]] private val stages = new mutable.HashSet[Int] private[history] def liveSQLExecutions: Set[Long] = liveExecutionToJobs.keySet.toSet private[history] def liveJobs: Set[Int] = liveExecutionToJobs.values.flatten.toSet private[history] def liveStages: Set[Int] = stageToRDDs.keySet.toSet private[history] def liveTasks: Set[Long] = stageToTasks.values.flatten.toSet private[history] def liveRDDs: Set[Int] = stageToRDDs.values.flatten.toSet override def onJobStart(jobStart: SparkListenerJobStart): Unit = { val executionIdString = jobStart.properties.getProperty(SQLExecution.EXECUTION_ID_KEY) if (executionIdString == null) { // This is not a job created by SQL return } val executionId = executionIdString.toLong val jobId = jobStart.jobId val jobsForExecution = liveExecutionToJobs.getOrElseUpdate(executionId, mutable.HashSet[Int]()) jobsForExecution += jobId jobToStages += jobStart.jobId -> jobStart.stageIds.toSet stages ++= jobStart.stageIds } override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { val stageId = stageSubmitted.stageInfo.stageId if (stages.contains(stageId)) { stageToRDDs.put(stageId, stageSubmitted.stageInfo.rddInfos.map(_.id).toSet) stageToTasks.getOrElseUpdate(stageId, new mutable.HashSet[Long]()) } } override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { stageToTasks.get(taskStart.stageId).foreach { tasks => tasks += taskStart.taskInfo.taskId } } override def onOtherEvent(event: SparkListenerEvent): Unit = event match { case e: SparkListenerSQLExecutionStart => onExecutionStart(e) case e: SparkListenerSQLExecutionEnd => onExecutionEnd(e) case _ => // Ignore } private def onExecutionStart(event: SparkListenerSQLExecutionStart): Unit = { liveExecutionToJobs += event.executionId -> mutable.HashSet[Int]() } private def onExecutionEnd(event: SparkListenerSQLExecutionEnd): Unit = { liveExecutionToJobs.remove(event.executionId).foreach { jobs => val stagesToDrop = jobToStages.filter(kv => jobs.contains(kv._1)).values.flatten jobToStages --= jobs stages --= stagesToDrop stageToTasks --= stagesToDrop stageToRDDs --= stagesToDrop } } override def createFilter(): EventFilter = { new SQLLiveEntitiesEventFilter(liveSQLExecutions, liveJobs, liveStages, liveTasks, liveRDDs) } } /** * This class accepts events which are related to the live SQL executions based on the given * information. * * Note that acceptFn will not match the event ("Don't mind") instead of returning false on * job related events, because it cannot determine whether the job is related to the finished * SQL executions, or job is NOT related to the SQL executions. For this case, it just gives up * the decision and let other filters decide it. */ private[spark] class SQLLiveEntitiesEventFilter( liveSQLExecutions: Set[Long], liveJobs: Set[Int], liveStages: Set[Int], liveTasks: Set[Long], liveRDDs: Set[Int]) extends JobEventFilter(None, liveJobs, liveStages, liveTasks, liveRDDs) with Logging { logDebug(s"live SQL executions : $liveSQLExecutions") private val _acceptFn: PartialFunction[SparkListenerEvent, Boolean] = { case e: SparkListenerSQLExecutionStart => liveSQLExecutions.contains(e.executionId) case e: SparkListenerSQLAdaptiveExecutionUpdate => liveSQLExecutions.contains(e.executionId) case e: SparkListenerSQLExecutionEnd => liveSQLExecutions.contains(e.executionId) case e: SparkListenerDriverAccumUpdates => liveSQLExecutions.contains(e.executionId) case e if acceptFnForJobEvents.lift(e).contains(true) => // NOTE: if acceptFnForJobEvents(e) returns false, we should leave it to "unmatched" // because we don't know whether the job has relevant SQL execution which is finished, // or the job is not related to the SQL execution. true // these events are for finished batches so safer to ignore case _: StreamingQueryListener.QueryProgressEvent => false } override def acceptFn(): PartialFunction[SparkListenerEvent, Boolean] = _acceptFn }
mahak/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/history/SQLEventFilterBuilder.scala
Scala
apache-2.0
6,205
/******************************************************************************* * Copyright 2013 Simon Todd <simon@sltodd.co.uk>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package uk.co.sltodd.geneticakka import akka.testkit.TestActorRef import scala.concurrent.duration._ import scala.concurrent.Await import akka.pattern.ask import org.junit.Test import akka.actor.ActorSystem import akka.util.Timeout import org.junit.Assert._ class HostTest { @Test def basicCandidate() = { class Cnd extends Host { def fitness(c : Chromosome) = { c.genes(0) - 10 } val chromosomeSize = 1 } implicit val system = ActorSystem() val worker = TestActorRef(new Cnd()) val c = Chromosome(List(5)) implicit val timeout = Timeout(1 second) val result = Await.result(worker ? c, timeout.duration).asInstanceOf[Result] val result2 = Await.result(worker ? (), timeout.duration) assertEquals("Incorrect fitness", -5.0, result.fitness, 1e-15) result2 match { case Failure => { } case _ => fail("Not receiving Failure class.") } } }
SLTodd/genetic-akka
src/test/scala/uk/co/sltodd/geneticakka/HostTest.scala
Scala
apache-2.0
1,733
package collins.models.asset import play.api.libs.json.JsObject import play.api.libs.json.JsString import play.api.libs.json.JsValue import play.api.libs.json.Json import collins.models.Asset import collins.models.AssetMeta import collins.models.AssetMetaValue import collins.models.IpAddresses import collins.models.IpmiInfo import collins.models.LldpHelper import collins.models.LshwHelper import collins.models.MetaWrapper import collins.models.PowerHelper import collins.models.conversions.IpAddressFormat import collins.models.conversions.IpmiFormat import collins.util.LldpRepresentation import collins.util.LshwRepresentation import collins.util.config.Feature import collins.util.power.PowerUnit.PowerUnitFormat import collins.util.power.PowerUnits import collins.util.power.PowerUnits object AllAttributes { def get(asset: Asset): AllAttributes = { if (asset.isConfiguration) { AllAttributes(asset, LshwRepresentation.empty, LldpRepresentation.empty, None, IpAddresses.findAllByAsset(asset), PowerUnits(), AssetMetaValue.findByAsset(asset)) } else { val (lshwRep, mvs) = LshwHelper.reconstruct(asset) val (lldpRep, mvs2) = LldpHelper.reconstruct(asset, mvs) val ipmi = IpmiInfo.findByAsset(asset) val addresses = IpAddresses.findAllByAsset(asset) val (powerRep, mvs3) = PowerHelper.reconstruct(asset, mvs2) val filtered: Seq[MetaWrapper] = mvs3.filter(f => !Feature.hideMeta.contains(f.getName)) AllAttributes(asset, lshwRep, lldpRep, ipmi, addresses, powerRep, filtered) } } } case class AllAttributes( asset: Asset, lshw: LshwRepresentation, lldp: LldpRepresentation, ipmi: Option[IpmiInfo], addresses: Seq[IpAddresses], power: PowerUnits, mvs: Seq[MetaWrapper]) { import collins.models.conversions._ import collins.util.power.PowerUnit.PowerUnitFormat def exposeCredentials(showCreds: Boolean = false) = { this.copy(ipmi = this.ipmi.map { _.withExposedCredentials(showCreds) }) .copy(mvs = this.metaValuesWithExposedCredentials(showCreds)) } protected def metaValuesWithExposedCredentials(showCreds: Boolean): Seq[MetaWrapper] = { if (showCreds) { mvs } else { mvs.filter(mv => !Feature.encryptedTags.contains(mv.getName)) } } def toJsValue(): JsValue = { val outSeq = Seq( "ASSET" -> asset.toJsValue, "HARDWARE" -> lshw.toJsValue, "CLASSIFICATION" -> Json.toJson(asset.nodeClass), "LLDP" -> lldp.toJsValue, "IPMI" -> Json.toJson(ipmi), "ADDRESSES" -> Json.toJson(addresses), "POWER" -> Json.toJson(power), "ATTRIBS" -> JsObject(mvs.groupBy { _.getGroupId }.map { case (groupId, mv) => groupId.toString -> JsObject(mv.map { mvw => mvw.getName -> JsString(mvw.getValue) }) }.toSeq)) JsObject(outSeq) } }
byxorna/collins
app/collins/models/asset/AllAttributes.scala
Scala
apache-2.0
2,894
package opennlp.scalabha.tag import scala.collection.SeqLike import scala.collection.generic.CanBuildFrom object TagUtils { class Enriched_ended_Seq[A, Repr](self: SeqLike[A, Repr]) { def ended[That](implicit bf: CanBuildFrom[Repr, Option[A], That]): That = { val b = bf(self.asInstanceOf[Repr]) b.sizeHint(self.size) b += None for (x <- self) b += Some(x) b += None b.result } } implicit def enriched_ended_Seq[A, Repr](self: SeqLike[A, Repr]) = new Enriched_ended_Seq(self) }
utcompling/Scalabha
src/main/scala/opennlp/scalabha/tag/TagUtils.scala
Scala
apache-2.0
532
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.examples.bagel import org.apache.spark._ import org.apache.spark.SparkContext._ import org.apache.spark.serializer.KryoRegistrator import org.apache.spark.bagel._ import org.apache.spark.bagel.Bagel._ import scala.collection.mutable.ArrayBuffer import java.io.{InputStream, OutputStream, DataInputStream, DataOutputStream} import com.esotericsoftware.kryo._ class PageRankUtils extends Serializable { def computeWithCombiner(numVertices: Long, epsilon: Double)( self: PRVertex, messageSum: Option[Double], superstep: Int ): (PRVertex, Array[PRMessage]) = { val newValue = messageSum match { case Some(msgSum) if msgSum != 0 => 0.15 / numVertices + 0.85 * msgSum case _ => self.value } val terminate = superstep >= 10 val outbox: Array[PRMessage] = if (!terminate) self.outEdges.map(targetId => new PRMessage(targetId, newValue / self.outEdges.size)) else Array[PRMessage]() (new PRVertex(newValue, self.outEdges, !terminate), outbox) } def computeNoCombiner(numVertices: Long, epsilon: Double)(self: PRVertex, messages: Option[Array[PRMessage]], superstep: Int): (PRVertex, Array[PRMessage]) = computeWithCombiner(numVertices, epsilon)(self, messages match { case Some(msgs) => Some(msgs.map(_.value).sum) case None => None }, superstep) } class PRCombiner extends Combiner[PRMessage, Double] with Serializable { def createCombiner(msg: PRMessage): Double = msg.value def mergeMsg(combiner: Double, msg: PRMessage): Double = combiner + msg.value def mergeCombiners(a: Double, b: Double): Double = a + b } class PRVertex() extends Vertex with Serializable { var value: Double = _ var outEdges: Array[String] = _ var active: Boolean = _ def this(value: Double, outEdges: Array[String], active: Boolean = true) { this() this.value = value this.outEdges = outEdges this.active = active } override def toString(): String = { "PRVertex(value=%f, outEdges.length=%d, active=%s)".format(value, outEdges.length, active.toString) } } class PRMessage() extends Message[String] with Serializable { var targetId: String = _ var value: Double = _ def this(targetId: String, value: Double) { this() this.targetId = targetId this.value = value } } class PRKryoRegistrator extends KryoRegistrator { def registerClasses(kryo: Kryo) { kryo.register(classOf[PRVertex]) kryo.register(classOf[PRMessage]) } } class CustomPartitioner(partitions: Int) extends Partitioner { def numPartitions = partitions def getPartition(key: Any): Int = { val hash = key match { case k: Long => (k & 0x00000000FFFFFFFFL).toInt case _ => key.hashCode } val mod = key.hashCode % partitions if (mod < 0) mod + partitions else mod } override def equals(other: Any): Boolean = other match { case c: CustomPartitioner => c.numPartitions == numPartitions case _ => false } }
dotunolafunmiloye/spark
examples/src/main/scala/org/apache/spark/examples/bagel/PageRankUtils.scala
Scala
apache-2.0
3,823
import sbt._ import Keys._ object CommonSettings { private val SparkVersion = "2.1.0" val sparkDependencies: Seq[ModuleID] = Seq( "org.apache.spark" %% "spark-core" % SparkVersion, "org.apache.spark" %% "spark-sql" % SparkVersion ) val commonLibDependencies: Seq[ModuleID] = Seq( "com.github.nscala-time" %% "nscala-time" % "2.16.0" ) val commonTestScalacSettings: Seq[String] = Seq("-Yrangepos") val commonResolvers: Seq[MavenRepository] = Seq( "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases" ) val commonSettings: Seq[Def.Setting[_]] = Seq( resolvers ++= commonResolvers, scalacOptions in Test ++= commonTestScalacSettings, libraryDependencies ++= commonLibDependencies ) }
salceson/iosr-distributed-data-processing
project/CommonSettings.scala
Scala
mit
742
/* * Copyright (C) 2014-2015 by Nokia. * See the LICENCE.txt file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package wookie.spark import org.apache.spark.sql.SparkSession import org.apache.spark.streaming.StreamingContext import wookie.{RuntimeEnvironment, Sparkle} case class SparkRuntime(spark: SparkSession) extends RuntimeEnvironment { type A = SparkSession override val get = spark } case class SparkStreamingRuntime(ssc: StreamingContext) extends RuntimeEnvironment { type A = StreamingContext override val get = ssc } object SparkRuntime { def apply[A](f: SparkRuntime => A): Sparkle[A] = new Sparkle[A] { override def run(ctx: RuntimeEnvironment): A = f(ctx.asInstanceOf[SparkRuntime]) } } object SparkStreamingRuntime { def apply[A](f: SparkStreamingRuntime => A): Sparkle[A] = new Sparkle[A] { override def run(ctx: RuntimeEnvironment): A = f(ctx.asInstanceOf[SparkStreamingRuntime]) } }
elyast/wookie
spark-api/src/main/scala/wookie/spark/SparkRuntime.scala
Scala
apache-2.0
1,535
/* Copyright 2013 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.summingbird.online import com.twitter.algebird.{ Semigroup, Monoid, SummingQueue } import com.twitter.util.{Return, Throw} import com.twitter.summingbird.option.CacheSize import com.twitter.summingbird.online.option.FlushFrequency import java.util.concurrent._ import scala.collection.JavaConversions._ import com.twitter.util.Future import scala.collection.mutable.{Set => MSet, Queue => MQueue, Map => MMap} import org.slf4j.{LoggerFactory, Logger} /** * @author Ian O Connell */ object SummingQueueCache { def builder[Key, Value](cacheSize: CacheSize, flushFrequency: FlushFrequency) = new CacheBuilder[Key, Value] { def apply(sg: Semigroup[Value]) = new SummingQueueCache[Key, Value](cacheSize, flushFrequency)(sg) } } case class SummingQueueCache[Key, Value](cacheSizeOpt: CacheSize, flushFrequency: FlushFrequency) (implicit semigroup: Semigroup[Value]) extends AsyncCache[Key, Value] { @transient protected lazy val logger: Logger = LoggerFactory.getLogger(getClass) private val cacheSize = cacheSizeOpt.size.getOrElse(0) private val squeue: SummingQueue[Map[Key, Value]] = SummingQueue(cacheSize) @volatile private var lastDump:Long = System.currentTimeMillis private def timedOut = (System.currentTimeMillis - lastDump) > flushFrequency.get.inMilliseconds def forceTick: Future[Map[Key, Value]] = Future.value(squeue.flush.getOrElse(Map.empty)) def tick: Future[Map[Key, Value]] = if(timedOut) { lastDump = System.currentTimeMillis forceTick } else { Future.value(Map.empty) } def insert(vals: TraversableOnce[(Key, Value)]): Future[Map[Key, Value]] = Future.value(squeue.put(Monoid.sum(vals.map(Map(_)))).getOrElse(Map.empty)) }
surabhiiyer/summingbird
summingbird-online/src/main/scala/com/twitter/summingbird/online/SummingQueueCache.scala
Scala
apache-2.0
2,317
package org.jetbrains.jps.incremental.scala.model.impl import org.jetbrains.jps.incremental.scala.model.JpsSbtModuleExtension import org.jetbrains.jps.model.ex.{JpsElementBase, JpsElementChildRoleBase} import org.jetbrains.jps.model.{JpsElement, JpsElementChildRole} final class JpsSbtModuleExtensionImpl extends JpsElementBase[JpsSbtModuleExtensionImpl] with JpsSbtModuleExtension { override def createCopy(): JpsSbtModuleExtensionImpl = new JpsSbtModuleExtensionImpl override def applyChanges(modified: JpsSbtModuleExtensionImpl): Unit = {} } object JpsSbtModuleExtensionImpl { val Role: JpsElementChildRole[JpsSbtModuleExtension] = JpsElementChildRoleBase.create("sbt") }
JetBrains/intellij-scala
scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/model/impl/JpsSbtModuleExtensionImpl.scala
Scala
apache-2.0
685
package edu.cmu.cs.oak.nodes import edu.cmu.cs.oak.value.OakValue import edu.cmu.cs.oak.value.Choice import edu.cmu.cs.oak.value.StringValue case class LiteralNode(sv: String, file: String, lineNr: Int) extends DNode { //assert(lv.location != null) def getChildren(): Seq[DNode] = null def toXml: scala.xml.Elem = { <Literal Text={sv} Length={sv.length.toString()} File={file} Line={lineNr.toString()} /> } override def ifdefy(): List[String] = List(sv) override def toString(): String = sv override def isEmpty() = (sv == null) }
smba/oak
edu.cmu.cs.oak/src/main/scala/edu/cmu/cs/oak/nodes/LiteralNode.scala
Scala
lgpl-3.0
563
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.spark.sql.streaming import org.apache.spark.TaskContext import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.encoders.RowEncoder import org.apache.spark.sql.types.StructType import org.elasticsearch.hadoop.EsHadoopIllegalArgumentException import org.elasticsearch.hadoop.serialization.BytesConverter import org.elasticsearch.hadoop.serialization.JdkBytesConverter import org.elasticsearch.hadoop.serialization.builder.ValueWriter import org.elasticsearch.hadoop.serialization.field.FieldExtractor import org.elasticsearch.spark.rdd.EsRDDWriter import org.elasticsearch.spark.sql.DataFrameFieldExtractor import org.elasticsearch.spark.sql.DataFrameValueWriter /** * Takes in iterator of InternalRow objects from a partition of data, writes it to Elasticsearch, and manages * the streaming commit protocol. */ private [sql] class EsStreamQueryWriter(serializedSettings: String, schema: StructType, commitProtocol: EsCommitProtocol) extends EsRDDWriter[InternalRow](serializedSettings) { override protected def valueWriter: Class[_ <: ValueWriter[_]] = classOf[DataFrameValueWriter] override protected def bytesConverter: Class[_ <: BytesConverter] = classOf[JdkBytesConverter] override protected def fieldExtractor: Class[_ <: FieldExtractor] = classOf[DataFrameFieldExtractor] private val encoder: ExpressionEncoder[Row] = RowEncoder(schema).resolveAndBind() override def write(taskContext: TaskContext, data: Iterator[InternalRow]): Unit = { // Keep clients from using this method, doesn't return task commit information. throw new EsHadoopIllegalArgumentException("Use run(taskContext, data) instead to retrieve the commit information") } def run(taskContext: TaskContext, data: Iterator[InternalRow]): TaskCommit = { val taskInfo = TaskState(taskContext.partitionId(), settings.getResourceWrite) commitProtocol.initTask(taskInfo) try { super.write(taskContext, data) } catch { case t: Throwable => commitProtocol.abortTask(taskInfo) throw t } commitProtocol.commitTask(taskInfo) } override protected def processData(data: Iterator[InternalRow]): Any = { val row = encoder.fromRow(data.next()) commitProtocol.recordSeen() (row, schema) } }
elastic/elasticsearch-hadoop
spark/sql-20/src/main/scala/org/elasticsearch/spark/sql/streaming/EsStreamQueryWriter.scala
Scala
apache-2.0
3,270
/* Copyright 2014 - 2015 Janek Bogucki Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.scalacraft.domain.v2.internal.ex /** * Exception to throw when a constructor argument is null. * * This is not part of the public API because the public API does not support * null constructor arguments. */ case class NullConstructorArgumentException(paramName: String) extends IllegalArgumentException(paramName)
janekdb/scalacraft-domain
src/main/scala/com/scalacraft/domain/v2/internal/ex/NullConstructorArgumentException.scala
Scala
apache-2.0
954
import sbt._ import Keys._ import play.Project._ object ApplicationBuild extends Build { val appName = "book_exchange" val appVersion = "1.0-SNAPSHOT" val appDependencies = Seq( // Add your project dependencies here, javaCore, javaJdbc, javaEbean, "org.webjars" % "webjars-play" % "2.1.0", "org.webjars" % "bootstrap" % "2.3.1" ) val main = play.Project(appName, appVersion, appDependencies).settings( // Add your own project settings here //resolvers += "webjars" at "http://webjars.github.com/m2" ) }
ttaomae/book-exchange
project/Build.scala
Scala
bsd-2-clause
551
package com.socrata.datacoordinator.common import java.security.SecureRandom object StandardObfuscationKeyGenerator extends (() => Array[Byte]) { val rng = new SecureRandom val len = 72 /* Magic */ def apply(): Array[Byte] = { val cs = new Array[Byte](len) rng.nextBytes(cs) cs } }
socrata-platform/data-coordinator
coordinatorlib/src/main/scala/com/socrata/datacoordinator/common/StandardObfuscationKeyGenerator.scala
Scala
apache-2.0
305
package com.stulsoft.serialization /** * @author Yuriy Stul. */ case class Message3(content:String) extends MessageTrait
ysden123/poc
pserialization/src/main/scala/com/stulsoft/serialization/Message3.scala
Scala
mit
126
package com.twitter.finagle.zipkin.core import com.twitter.finagle.tracing._ import com.twitter.util._ import org.mockito.Mockito._ import org.scalacheck.{Gen, Arbitrary} import org.scalatest.FunSuite import org.scalatestplus.mockito.MockitoSugar import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks import java.net.InetSocketAddress class ZipkinTracerTest extends FunSuite with MockitoSugar with ScalaCheckDrivenPropertyChecks { test("ZipkinTracer should handle sampling") { val traceId = TraceId(Some(SpanId(123)), Some(SpanId(123)), SpanId(123), None) val underlying = mock[RawZipkinTracer] val tracer = new SamplingTracer(underlying, 0f) assert(tracer.sampleTrace(traceId) == Some(false)) tracer.setSampleRate(1f) assert(tracer.sampleTrace(traceId) == Some(true)) } test("ZipkinTracer should pass through trace id with sampled true despite of sample rate") { val underlying = mock[RawZipkinTracer] val tracer = new SamplingTracer(underlying, 0f) val id = TraceId(Some(SpanId(123)), Some(SpanId(123)), SpanId(123), Some(true)) val record = Record(id, Time.now, Annotation.ClientSend) tracer.record(record) verify(underlying).record(record) } test("ZipkinTracer should return isActivelyTracing correctly based on sampled value") { val underlying = mock[RawZipkinTracer] val tracer = new SamplingTracer(underlying, 0f) val id = TraceId(Some(SpanId(123)), Some(SpanId(123)), SpanId(123), Some(true)) val record = Record(id, Time.now, Annotation.ClientSend) // true when sampled is true assert(tracer.isActivelyTracing(id)) // true when sampled is not set assert(tracer.isActivelyTracing(id.copy(_sampled = None))) // true when debug is set assert(tracer.isActivelyTracing(id.copy(_sampled = Some(false), flags = Flags(Flags.Debug)))) // false when sampled is false assert(!tracer.isActivelyTracing(id.copy(_sampled = Some(false)))) } } private[twitter] object ZipkinTracerTest { import Annotation._ import Arbitrary.arbitrary val genAnnotation: Gen[Annotation] = Gen.oneOf( Gen.oneOf(ClientSend, ClientRecv, ServerSend, ServerRecv), Gen.oneOf( ClientSendFragment, ClientRecvFragment, ServerSendFragment, ServerRecvFragment ), for (s <- arbitrary[String]) yield Message(s), for (s <- arbitrary[String]) yield ServiceName(s), for (s <- arbitrary[String]) yield Rpc(s), Gen.oneOf( ClientAddr(new InetSocketAddress(0)), ServerAddr(new InetSocketAddress(0)), LocalAddr(new InetSocketAddress(0)) ), // We only guarantee successful deserialization for primitive values and // Strings, here we test String. for (v <- Gen.oneOf(arbitrary[AnyVal], arbitrary[String])) yield BinaryAnnotation("k", v) ) }
luciferous/finagle
finagle-zipkin-core/src/test/scala/com/twitter/finagle/zipkin/core/SamplingTracerTest.scala
Scala
apache-2.0
2,817
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.io.{File, RandomAccessFile} import java.nio._ import java.nio.file.Paths import java.util.Properties import java.util.concurrent.{CountDownLatch, TimeUnit} import kafka.common._ import kafka.server.{BrokerTopicStats, LogDirFailureChannel} import kafka.utils._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils import org.junit.Assert._ import org.junit.{After, Test} import org.scalatest.junit.JUnitSuite import scala.collection.JavaConverters._ import scala.collection._ /** * Unit tests for the log cleaning logic */ class LogCleanerTest extends JUnitSuite { val tmpdir = TestUtils.tempDir() val dir = TestUtils.randomPartitionLogDir(tmpdir) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) logProps.put(LogConfig.SegmentIndexBytesProp, 1024: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact) logProps.put(LogConfig.MessageTimestampDifferenceMaxMsProp, Long.MaxValue.toString) val logConfig = LogConfig(logProps) val time = new MockTime() val throttler = new Throttler(desiredRatePerSec = Double.MaxValue, checkIntervalMs = Long.MaxValue, time = time) @After def teardown(): Unit = { Utils.delete(tmpdir) } /** * Test simple log cleaning */ @Test def testCleanSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while(log.numberOfSegments < 4) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keysFound = LogTest.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) // pretend we have the following keys val keys = immutable.ListSet(1L, 3L, 5L, 7L, 9L) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) // clean the log val segments = log.logSegments.take(3).toSeq val stats = new CleanerStats() val expectedBytesRead = segments.map(_.size).sum cleaner.cleanSegments(log, segments, map, 0L, stats) val shouldRemain = LogTest.keysInLog(log).filter(!keys.contains(_)) assertEquals(shouldRemain, LogTest.keysInLog(log)) assertEquals(expectedBytesRead, stats.bytesRead) } @Test def testCleanSegmentsWithConcurrentSegmentDeletion(): Unit = { val deleteStartLatch = new CountDownLatch(1) val deleteCompleteLatch = new CountDownLatch(1) // Construct a log instance. The replaceSegments() method of the log instance is overridden so that // it waits for another thread to execute deleteOldSegments() val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024 : java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact + "," + LogConfig.Delete) val topicPartition = Log.parseTopicPartitionName(dir) val producerStateManager = new ProducerStateManager(topicPartition, dir) val log = new Log(dir, config = LogConfig.fromProps(logConfig.originals, logProps), logStartOffset = 0L, recoveryPoint = 0L, scheduler = time.scheduler, brokerTopicStats = new BrokerTopicStats, time, maxProducerIdExpirationMs = 60 * 60 * 1000, producerIdExpirationCheckIntervalMs = LogManager.ProducerIdExpirationCheckIntervalMs, topicPartition = topicPartition, producerStateManager = producerStateManager, logDirFailureChannel = new LogDirFailureChannel(10)) { override def replaceSegments(newSegments: Seq[LogSegment], oldSegments: Seq[LogSegment], isRecoveredSwapFile: Boolean = false): Unit = { deleteStartLatch.countDown() if (!deleteCompleteLatch.await(5000, TimeUnit.MILLISECONDS)) { throw new IllegalStateException("Log segment deletion timed out") } super.replaceSegments(newSegments, oldSegments, isRecoveredSwapFile) } } // Start a thread which execute log.deleteOldSegments() right before replaceSegments() is executed val t = new Thread() { override def run(): Unit = { deleteStartLatch.await(5000, TimeUnit.MILLISECONDS) log.maybeIncrementLogStartOffset(log.activeSegment.baseOffset) log.onHighWatermarkIncremented(log.activeSegment.baseOffset) log.deleteOldSegments() deleteCompleteLatch.countDown() } } t.start() // Append records so that segment number increase to 3 while (log.numberOfSegments < 3) { log.appendAsLeader(record(key = 0, log.logEndOffset.toInt), leaderEpoch = 0) log.roll() } assertEquals(3, log.numberOfSegments) // Remember reference to the first log and determine its file name expected for async deletion val firstLogFile = log.logSegments.head.log val expectedFileName = CoreUtils.replaceSuffix(firstLogFile.file.getPath, "", Log.DeletedFileSuffix) // Clean the log. This should trigger replaceSegments() and deleteOldSegments(); val offsetMap = new FakeOffsetMap(Int.MaxValue) val cleaner = makeCleaner(Int.MaxValue) val segments = log.logSegments(0, log.activeSegment.baseOffset).toSeq val stats = new CleanerStats() cleaner.buildOffsetMap(log, 0, log.activeSegment.baseOffset, offsetMap, stats) cleaner.cleanSegments(log, segments, offsetMap, 0L, stats) // Validate based on the file name that log segment file is renamed exactly once for async deletion assertEquals(expectedFileName, firstLogFile.file().getPath) assertEquals(2, log.numberOfSegments) } @Test def testSizeTrimmedForPreallocatedAndCompactedTopic(): Unit = { val originalMaxFileSize = 1024; val cleaner = makeCleaner(2) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, originalMaxFileSize: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, "compact": java.lang.String) logProps.put(LogConfig.PreAllocateEnableProp, "true": java.lang.String) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() // clean the log with only one message removed cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertTrue("Cleaned segment file should be trimmed to its real size.", log.logSegments.iterator.next.log.channel().size() < originalMaxFileSize) } @Test def testDuplicateCheckAfterCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 2048: java.lang.Integer) var log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val pid1 = 1 val pid2 = 2 val pid3 = 3 val pid4 = 4 appendIdempotentAsLeader(log, pid1, producerEpoch)(Seq(1, 2, 3)) appendIdempotentAsLeader(log, pid2, producerEpoch)(Seq(3, 1, 4)) appendIdempotentAsLeader(log, pid3, producerEpoch)(Seq(1, 4)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(2, 5, 7), lastOffsetsPerBatchInLog(log)) assertEquals(Map(pid1 -> 2, pid2 -> 2, pid3 -> 1), lastSequencesInLog(log)) assertEquals(List(2, 3, 1, 4), LogTest.keysInLog(log)) assertEquals(List(1, 3, 6, 7), offsetsInLog(log)) // we have to reload the log to validate that the cleaner maintained sequence numbers correctly def reloadLog(): Unit = { log.close() log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps), recoveryPoint = 0L) } reloadLog() // check duplicate append from producer 1 var logAppendInfo = appendIdempotentAsLeader(log, pid1, producerEpoch)(Seq(1, 2, 3)) assertEquals(0L, logAppendInfo.firstOffset.get) assertEquals(2L, logAppendInfo.lastOffset) // check duplicate append from producer 3 logAppendInfo = appendIdempotentAsLeader(log, pid3, producerEpoch)(Seq(1, 4)) assertEquals(6L, logAppendInfo.firstOffset.get) assertEquals(7L, logAppendInfo.lastOffset) // check duplicate append from producer 2 logAppendInfo = appendIdempotentAsLeader(log, pid2, producerEpoch)(Seq(3, 1, 4)) assertEquals(3L, logAppendInfo.firstOffset.get) assertEquals(5L, logAppendInfo.lastOffset) // do one more append and a round of cleaning to force another deletion from producer 1's batch appendIdempotentAsLeader(log, pid4, producerEpoch)(Seq(2)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(Map(pid1 -> 2, pid2 -> 2, pid3 -> 1, pid4 -> 0), lastSequencesInLog(log)) assertEquals(List(2, 5, 7, 8), lastOffsetsPerBatchInLog(log)) assertEquals(List(3, 1, 4, 2), LogTest.keysInLog(log)) assertEquals(List(3, 6, 7, 8), offsetsInLog(log)) reloadLog() // duplicate append from producer1 should still be fine logAppendInfo = appendIdempotentAsLeader(log, pid1, producerEpoch)(Seq(1, 2, 3)) assertEquals(0L, logAppendInfo.firstOffset.get) assertEquals(2L, logAppendInfo.lastOffset) } @Test def testBasicTransactionAwareCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 2048: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val pid1 = 1 val pid2 = 2 val appendProducer1 = appendTransactionalAsLeader(log, pid1, producerEpoch) val appendProducer2 = appendTransactionalAsLeader(log, pid2, producerEpoch) appendProducer1(Seq(1, 2)) appendProducer2(Seq(2, 3)) appendProducer1(Seq(3, 4)) log.appendAsLeader(abortMarker(pid1, producerEpoch), leaderEpoch = 0, isFromClient = false) log.appendAsLeader(commitMarker(pid2, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer1(Seq(2)) log.appendAsLeader(commitMarker(pid1, producerEpoch), leaderEpoch = 0, isFromClient = false) val abortedTransactions = log.collectAbortedTransactions(log.logStartOffset, log.logEndOffset) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3, 2), LogTest.keysInLog(log)) assertEquals(List(3, 6, 7, 8, 9), offsetsInLog(log)) // ensure the transaction index is still correct assertEquals(abortedTransactions, log.collectAbortedTransactions(log.logStartOffset, log.logEndOffset)) } @Test def testCleanWithTransactionsSpanningSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val pid1 = 1 val pid2 = 2 val pid3 = 3 val appendProducer1 = appendTransactionalAsLeader(log, pid1, producerEpoch) val appendProducer2 = appendTransactionalAsLeader(log, pid2, producerEpoch) val appendProducer3 = appendTransactionalAsLeader(log, pid3, producerEpoch) appendProducer1(Seq(1, 2)) appendProducer3(Seq(2, 3)) appendProducer2(Seq(3, 4)) log.roll() appendProducer2(Seq(5, 6)) appendProducer3(Seq(6, 7)) appendProducer1(Seq(7, 8)) log.appendAsLeader(abortMarker(pid2, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer3(Seq(8, 9)) log.appendAsLeader(commitMarker(pid3, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer1(Seq(9, 10)) log.appendAsLeader(abortMarker(pid1, producerEpoch), leaderEpoch = 0, isFromClient = false) // we have only cleaned the records in the first segment val dirtyOffset = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset))._1 assertEquals(List(2, 3, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10), LogTest.keysInLog(log)) log.roll() // append a couple extra segments in the new segment to ensure we have sequence numbers appendProducer2(Seq(11)) appendProducer1(Seq(12)) // finally only the keys from pid3 should remain cleaner.clean(LogToClean(new TopicPartition("test", 0), log, dirtyOffset, log.activeSegment.baseOffset)) assertEquals(List(2, 3, 6, 7, 8, 9, 11, 12), LogTest.keysInLog(log)) } @Test def testCommitMarkerRemoval(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) appendProducer(Seq(2, 3)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer(Seq(2)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() // cannot remove the marker in this pass because there are still valid records var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(1, 3, 2), LogTest.keysInLog(log)) assertEquals(List(0, 2, 3, 4, 5), offsetsInLog(log)) appendProducer(Seq(1, 3)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() // the first cleaning preserves the commit marker (at offset 3) since there were still records for the transaction dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 1, 3), LogTest.keysInLog(log)) assertEquals(List(3, 4, 5, 6, 7, 8), offsetsInLog(log)) // delete horizon forced to 0 to verify marker is not removed early dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = 0L)._1 assertEquals(List(2, 1, 3), LogTest.keysInLog(log)) assertEquals(List(3, 4, 5, 6, 7, 8), offsetsInLog(log)) // clean again with large delete horizon and verify the marker is removed dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 1, 3), LogTest.keysInLog(log)) assertEquals(List(4, 5, 6, 7, 8), offsetsInLog(log)) } /** * Tests log cleaning with batches that are deleted where no additional messages * are available to read in the buffer. Cleaning should continue from the next offset. */ @Test def testDeletedBatchesWithNoMessagesRead(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(capacity = Int.MaxValue, maxMessageSize = 100) val logProps = new Properties() logProps.put(LogConfig.MaxMessageBytesProp, 100: java.lang.Integer) logProps.put(LogConfig.SegmentBytesProp, 1000: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer(Seq(2)) appendProducer(Seq(2)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = Long.MaxValue) assertEquals(List(2), LogTest.keysInLog(log)) assertEquals(List(1, 3, 4), offsetsInLog(log)) cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = Long.MaxValue) assertEquals(List(2), LogTest.keysInLog(log)) assertEquals(List(3, 4), offsetsInLog(log)) } @Test def testCommitMarkerRetentionWithEmptyBatch(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(2, 3)) // batch last offset is 1 log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() log.appendAsLeader(record(2, 2), leaderEpoch = 0) log.appendAsLeader(record(3, 3), leaderEpoch = 0) log.roll() // first time through the records are removed var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 3), LogTest.keysInLog(log)) assertEquals(List(2, 3, 4), offsetsInLog(log)) // commit marker is retained assertEquals(List(1, 2, 3, 4), lastOffsetsPerBatchInLog(log)) // empty batch is retained // the empty batch remains if cleaned again because it still holds the last sequence dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 3), LogTest.keysInLog(log)) assertEquals(List(2, 3, 4), offsetsInLog(log)) // commit marker is still retained assertEquals(List(1, 2, 3, 4), lastOffsetsPerBatchInLog(log)) // empty batch is retained // append a new record from the producer to allow cleaning of the empty batch appendProducer(Seq(1)) log.roll() dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 3, 1), LogTest.keysInLog(log)) assertEquals(List(2, 3, 4, 5), offsetsInLog(log)) // commit marker is still retained assertEquals(List(2, 3, 4, 5), lastOffsetsPerBatchInLog(log)) // empty batch should be gone dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(2, 3, 1), LogTest.keysInLog(log)) assertEquals(List(3, 4, 5), offsetsInLog(log)) // commit marker is gone assertEquals(List(3, 4, 5), lastOffsetsPerBatchInLog(log)) // empty batch is gone } @Test def testAbortMarkerRemoval(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) appendProducer(Seq(2, 3)) log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) appendProducer(Seq(3)) log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() // delete horizon set to 0 to verify marker is not removed early val dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = 0L)._1 assertEquals(List(3), LogTest.keysInLog(log)) assertEquals(List(3, 4, 5), offsetsInLog(log)) // clean again with large delete horizon and verify the marker is removed cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue) assertEquals(List(3), LogTest.keysInLog(log)) assertEquals(List(4, 5), offsetsInLog(log)) } @Test def testAbortMarkerRetentionWithEmptyBatch(): Unit = { val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(2, 3)) // batch last offset is 1 log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() def assertAbortedTransactionIndexed(): Unit = { val abortedTxns = log.collectAbortedTransactions(0L, 100L) assertEquals(1, abortedTxns.size) assertEquals(producerId, abortedTxns.head.producerId) assertEquals(0, abortedTxns.head.firstOffset) assertEquals(2, abortedTxns.head.lastOffset) } assertAbortedTransactionIndexed() // first time through the records are removed var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, 100L), deleteHorizonMs = Long.MaxValue)._1 assertAbortedTransactionIndexed() assertEquals(List(), LogTest.keysInLog(log)) assertEquals(List(2), offsetsInLog(log)) // abort marker is retained assertEquals(List(1, 2), lastOffsetsPerBatchInLog(log)) // empty batch is retained // the empty batch remains if cleaned again because it still holds the last sequence dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertAbortedTransactionIndexed() assertEquals(List(), LogTest.keysInLog(log)) assertEquals(List(2), offsetsInLog(log)) // abort marker is still retained assertEquals(List(1, 2), lastOffsetsPerBatchInLog(log)) // empty batch is retained // now update the last sequence so that the empty batch can be removed appendProducer(Seq(1)) log.roll() dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertAbortedTransactionIndexed() assertEquals(List(1), LogTest.keysInLog(log)) assertEquals(List(2, 3), offsetsInLog(log)) // abort marker is not yet gone because we read the empty batch assertEquals(List(2, 3), lastOffsetsPerBatchInLog(log)) // but we do not preserve the empty batch dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, 100L), deleteHorizonMs = Long.MaxValue)._1 assertEquals(List(1), LogTest.keysInLog(log)) assertEquals(List(3), offsetsInLog(log)) // abort marker is gone assertEquals(List(3), lastOffsetsPerBatchInLog(log)) // we do not bother retaining the aborted transaction in the index assertEquals(0, log.collectAbortedTransactions(0L, 100L).size) } /** * Test log cleaning with logs containing messages larger than default message size */ @Test def testLargeMessage() { val largeMessageSize = 1024 * 1024 // Create cleaner with very small default max message size val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, largeMessageSize * 16: java.lang.Integer) logProps.put(LogConfig.MaxMessageBytesProp, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while(log.numberOfSegments < 2) log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), leaderEpoch = 0) val keysFound = LogTest.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) // pretend we have the following keys val keys = immutable.ListSet(1L, 3L, 5L, 7L, 9L) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) // clean the log val stats = new CleanerStats() cleaner.cleanSegments(log, Seq(log.logSegments.head), map, 0L, stats) val shouldRemain = LogTest.keysInLog(log).filter(!keys.contains(_)) assertEquals(shouldRemain, LogTest.keysInLog(log)) } /** * Test log cleaning with logs containing messages larger than topic's max message size */ @Test def testMessageLargerThanMaxMessageSize() { val (log, offsetMap) = createLogWithMessagesLargerThanMaxSize(largeMessageSize = 1024 * 1024) val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) cleaner.cleanSegments(log, Seq(log.logSegments.head), offsetMap, 0L, new CleanerStats) val shouldRemain = LogTest.keysInLog(log).filter(k => !offsetMap.map.containsKey(k.toString)) assertEquals(shouldRemain, LogTest.keysInLog(log)) } /** * Test log cleaning with logs containing messages larger than topic's max message size * where header is corrupt */ @Test def testMessageLargerThanMaxMessageSizeWithCorruptHeader() { val (log, offsetMap) = createLogWithMessagesLargerThanMaxSize(largeMessageSize = 1024 * 1024) val file = new RandomAccessFile(log.logSegments.head.log.file, "rw") file.seek(Records.MAGIC_OFFSET) file.write(0xff) file.close() val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) intercept[CorruptRecordException] { cleaner.cleanSegments(log, Seq(log.logSegments.head), offsetMap, 0L, new CleanerStats) } } /** * Test log cleaning with logs containing messages larger than topic's max message size * where message size is corrupt and larger than bytes available in log segment. */ @Test def testCorruptMessageSizeLargerThanBytesAvailable() { val (log, offsetMap) = createLogWithMessagesLargerThanMaxSize(largeMessageSize = 1024 * 1024) val file = new RandomAccessFile(log.logSegments.head.log.file, "rw") file.setLength(1024) file.close() val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) intercept[CorruptRecordException] { cleaner.cleanSegments(log, Seq(log.logSegments.head), offsetMap, 0L, new CleanerStats) } } def createLogWithMessagesLargerThanMaxSize(largeMessageSize: Int): (Log, FakeOffsetMap) = { val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, largeMessageSize * 16: java.lang.Integer) logProps.put(LogConfig.MaxMessageBytesProp, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while(log.numberOfSegments < 2) log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), leaderEpoch = 0) val keysFound = LogTest.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) // Decrease the log's max message size logProps.put(LogConfig.MaxMessageBytesProp, largeMessageSize / 2: java.lang.Integer) log.config = LogConfig.fromProps(logConfig.originals, logProps) // pretend we have the following keys val keys = immutable.ListSet(1, 3, 5, 7, 9) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) (log, map) } @Test def testCleaningWithDeletes(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages with the keys 0 through N while(log.numberOfSegments < 2) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) // delete all even keys between 0 and N val leo = log.logEndOffset for(key <- 0 until leo.toInt by 2) log.appendAsLeader(tombstoneRecord(key), leaderEpoch = 0) // append some new unique keys to pad out to a new active segment while(log.numberOfSegments < 4) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) val keys = LogTest.keysInLog(log).toSet assertTrue("None of the keys we deleted should still exist.", (0 until leo.toInt by 2).forall(!keys.contains(_))) } def testLogCleanerStats(): Unit = { // because loadFactor is 0.75, this means we can fit 2 messages in the map val cleaner = makeCleaner(2) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() val initialLogSize = log.size val (endOffset, stats) = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals(5, endOffset) assertEquals(5, stats.messagesRead) assertEquals(initialLogSize, stats.bytesRead) assertEquals(2, stats.messagesWritten) assertEquals(log.size, stats.bytesWritten) assertEquals(0, stats.invalidMessagesRead) assertTrue(stats.endTime >= stats.startTime) } @Test def testLogCleanerRetainsProducerLastSequence(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0, 0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(0, 1, producerId = 1, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0, 2, producerId = 2, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(0, 3, producerId = 3, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(1, 1, producerId = 2, producerEpoch = 0, sequence = 1), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(1, 3, 4), lastOffsetsPerBatchInLog(log)) assertEquals(Map(1L -> 0, 2L -> 1, 3L -> 0), lastSequencesInLog(log)) assertEquals(List(0, 1), LogTest.keysInLog(log)) assertEquals(List(3, 4), offsetsInLog(log)) } @Test def testLogCleanerRetainsLastSequenceEvenIfTransactionAborted(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) appendProducer(Seq(2, 3)) log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, isFromClient = false) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(2, 3), lastOffsetsPerBatchInLog(log)) assertEquals(Map(producerId -> 2), lastSequencesInLog(log)) assertEquals(List(), LogTest.keysInLog(log)) assertEquals(List(3), offsetsInLog(log)) // Append a new entry from the producer and verify that the empty batch is cleaned up appendProducer(Seq(1, 5)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3, 5), lastOffsetsPerBatchInLog(log)) assertEquals(Map(producerId -> 4), lastSequencesInLog(log)) assertEquals(List(1, 5), LogTest.keysInLog(log)) assertEquals(List(3, 4, 5), offsetsInLog(log)) } @Test def testPartialSegmentClean(): Unit = { // because loadFactor is 0.75, this means we can fit 2 messages in the map val cleaner = makeCleaner(2) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() // clean the log with only one message removed cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals(List(1,0,1,0), LogTest.keysInLog(log)) assertEquals(List(1,2,3,4), offsetsInLog(log)) // continue to make progress, even though we can only clean one message at a time cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 3, log.activeSegment.baseOffset)) assertEquals(List(0,1,0), LogTest.keysInLog(log)) assertEquals(List(2,3,4), offsetsInLog(log)) cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 4, log.activeSegment.baseOffset)) assertEquals(List(1,0), LogTest.keysInLog(log)) assertEquals(List(3,4), offsetsInLog(log)) } @Test def testCleaningWithUncleanableSection(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // Number of distinct keys. For an effective test this should be small enough such that each log segment contains some duplicates. val N = 10 val numCleanableSegments = 2 val numTotalSegments = 7 // append messages with the keys 0 through N-1, values equal offset while(log.numberOfSegments <= numCleanableSegments) log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), leaderEpoch = 0) // at this point one message past the cleanable segments has been added // the entire segment containing the first uncleanable offset should not be cleaned. val firstUncleanableOffset = log.logEndOffset + 1 // +1 so it is past the baseOffset while(log.numberOfSegments < numTotalSegments - 1) log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), leaderEpoch = 0) // the last (active) segment has just one message def distinctValuesBySegment = log.logSegments.map(s => s.log.records.asScala.map(record => TestUtils.readString(record.value)).toSet.size).toSeq val disctinctValuesBySegmentBeforeClean = distinctValuesBySegment assertTrue("Test is not effective unless each segment contains duplicates. Increase segment size or decrease number of keys.", distinctValuesBySegment.reverse.tail.forall(_ > N)) cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, firstUncleanableOffset)) val distinctValuesBySegmentAfterClean = distinctValuesBySegment assertTrue("The cleanable segments should have fewer number of values after cleaning", disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean).take(numCleanableSegments).forall { case (before, after) => after < before }) assertTrue("The uncleanable segments should have the same number of values after cleaning", disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) .slice(numCleanableSegments, numTotalSegments).forall { x => x._1 == x._2 }) } @Test def testLogToClean(): Unit = { // create a log with small segment size val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment def createRecorcs = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) log.appendAsLeader(createRecorcs, leaderEpoch = 0) val logToClean = LogToClean(new TopicPartition("test", 0), log, log.activeSegment.baseOffset, log.activeSegment.baseOffset) assertEquals("Total bytes of LogToClean should equal size of all segments excluding the active segment", logToClean.totalBytes, log.size - log.activeSegment.size) } @Test def testLogToCleanWithUncleanableSection(): Unit = { // create a log with small segment size val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment def createRecords = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) log.appendAsLeader(createRecords, leaderEpoch = 0) // segments [0,1] are clean; segments [2, 3] are cleanable; segments [4,5] are uncleanable val segs = log.logSegments.toSeq val logToClean = LogToClean(new TopicPartition("test", 0), log, segs(2).baseOffset, segs(4).baseOffset) val expectedCleanSize = segs.take(2).map(_.size).sum val expectedCleanableSize = segs.slice(2, 4).map(_.size).sum assertEquals("Uncleanable bytes of LogToClean should equal size of all segments prior the one containing first dirty", logToClean.cleanBytes, expectedCleanSize) assertEquals("Cleanable bytes of LogToClean should equal size of all segments from the one containing first dirty offset" + " to the segment prior to the one with the first uncleanable offset", logToClean.cleanableBytes, expectedCleanableSize) assertEquals("Total bytes should be the sum of the clean and cleanable segments", logToClean.totalBytes, expectedCleanSize + expectedCleanableSize) assertEquals("Total cleanable ratio should be the ratio of cleanable size to clean plus cleanable", logToClean.cleanableRatio, expectedCleanableSize / (expectedCleanSize + expectedCleanableSize).toDouble, 1.0e-6d) } @Test def testCleaningWithUnkeyedMessages(): Unit = { val cleaner = makeCleaner(Int.MaxValue) // create a log with compaction turned off so we can append unkeyed messages val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Delete) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append unkeyed messages while(log.numberOfSegments < 2) log.appendAsLeader(unkeyedRecord(log.logEndOffset.toInt), leaderEpoch = 0) val numInvalidMessages = unkeyedMessageCountInLog(log) val sizeWithUnkeyedMessages = log.size // append keyed messages while(log.numberOfSegments < 3) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val expectedSizeAfterCleaning = log.size - sizeWithUnkeyedMessages val (_, stats) = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) assertEquals("Log should only contain keyed messages after cleaning.", 0, unkeyedMessageCountInLog(log)) assertEquals("Log should only contain keyed messages after cleaning.", expectedSizeAfterCleaning, log.size) assertEquals("Cleaner should have seen %d invalid messages.", numInvalidMessages, stats.invalidMessagesRead) } def lastOffsetsPerBatchInLog(log: Log): Iterable[Long] = { for (segment <- log.logSegments; batch <- segment.log.batches.asScala) yield batch.lastOffset } def lastSequencesInLog(log: Log): Map[Long, Int] = { (for (segment <- log.logSegments; batch <- segment.log.batches.asScala if !batch.isControlBatch && batch.hasProducerId) yield batch.producerId -> batch.lastSequence).toMap } /* extract all the offsets from a log */ def offsetsInLog(log: Log): Iterable[Long] = log.logSegments.flatMap(s => s.log.records.asScala.filter(_.hasValue).filter(_.hasKey).map(m => m.offset)) def unkeyedMessageCountInLog(log: Log) = log.logSegments.map(s => s.log.records.asScala.filter(_.hasValue).count(m => !m.hasKey)).sum def abortCheckDone(topicPartition: TopicPartition): Unit = { throw new LogCleaningAbortedException() } /** * Test that abortion during cleaning throws a LogCleaningAbortedException */ @Test def testCleanSegmentsWithAbort(): Unit = { val cleaner = makeCleaner(Int.MaxValue, abortCheckDone) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while(log.numberOfSegments < 4) log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keys = LogTest.keysInLog(log) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) intercept[LogCleaningAbortedException] { cleaner.cleanSegments(log, log.logSegments.take(3).toSeq, map, 0L, new CleanerStats()) } } /** * Validate the logic for grouping log segments together for cleaning */ @Test def testSegmentGrouping(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 300: java.lang.Integer) logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append some messages to the log var i = 0 while(log.numberOfSegments < 10) { log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) i += 1 } // grouping by very large values should result in a single group with all the segments in it var groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(1, groups.size) assertEquals(log.numberOfSegments, groups.head.size) checkSegmentOrder(groups) // grouping by very small values should result in all groups having one entry groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = 1, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(log.numberOfSegments, groups.size) assertTrue("All groups should be singletons.", groups.forall(_.size == 1)) checkSegmentOrder(groups) groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = 1, log.logEndOffset) assertEquals(log.numberOfSegments, groups.size) assertTrue("All groups should be singletons.", groups.forall(_.size == 1)) checkSegmentOrder(groups) val groupSize = 3 // check grouping by log size val logSize = log.logSegments.take(groupSize).map(_.size).sum.toInt + 1 groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = logSize, maxIndexSize = Int.MaxValue, log.logEndOffset) checkSegmentOrder(groups) assertTrue("All but the last group should be the target size.", groups.dropRight(1).forall(_.size == groupSize)) // check grouping by index size val indexSize = log.logSegments.take(groupSize).map(_.offsetIndex.sizeInBytes).sum + 1 groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = indexSize, log.logEndOffset) checkSegmentOrder(groups) assertTrue("All but the last group should be the target size.", groups.dropRight(1).forall(_.size == groupSize)) } /** * Validate the logic for grouping log segments together for cleaning when only a small number of * messages are retained, but the range of offsets is greater than Int.MaxValue. A group should not * contain a range of offsets greater than Int.MaxValue to ensure that relative offsets can be * stored in 4 bytes. */ @Test def testSegmentGroupingWithSparseOffsets(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 400: java.lang.Integer) logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // fill up first segment while (log.numberOfSegments == 1) log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) // forward offset and append message to next segment at offset Int.MaxValue val records = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue - 1) log.appendAsFollower(records) log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) assertEquals(Int.MaxValue, log.activeSegment.offsetIndex.lastOffset) // grouping should result in a single group with maximum relative offset of Int.MaxValue var groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(1, groups.size) // append another message, making last offset of second segment > Int.MaxValue log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) // grouping should not group the two segments to ensure that maximum relative offset in each group <= Int.MaxValue groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(2, groups.size) checkSegmentOrder(groups) // append more messages, creating new segments, further grouping should still occur while (log.numberOfSegments < 4) log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(log.numberOfSegments - 1, groups.size) for (group <- groups) assertTrue("Relative offset greater than Int.MaxValue", group.last.offsetIndex.lastOffset - group.head.offsetIndex.baseOffset <= Int.MaxValue) checkSegmentOrder(groups) } /** * Following the loading of a log segment where the index file is zero sized, * the index returned would be the base offset. Sometimes the log file would * contain data with offsets in excess of the baseOffset which would cause * the log cleaner to group together segments with a range of > Int.MaxValue * this test replicates that scenario to ensure that the segments are grouped * correctly. */ @Test def testSegmentGroupingFollowingLoadOfZeroIndex(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 400: java.lang.Integer) //mimic the effect of loading an empty index file logProps.put(LogConfig.IndexIntervalBytesProp, 400: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val record1 = messageWithOffset("hello".getBytes, "hello".getBytes, 0) log.appendAsFollower(record1) val record2 = messageWithOffset("hello".getBytes, "hello".getBytes, 1) log.appendAsFollower(record2) log.roll(Int.MaxValue/2) // starting a new log segment at offset Int.MaxValue/2 val record3 = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue/2) log.appendAsFollower(record3) val record4 = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue.toLong + 1) log.appendAsFollower(record4) assertTrue("Actual offset range should be > Int.MaxValue", log.logEndOffset - 1 - log.logStartOffset > Int.MaxValue) assertTrue("index.lastOffset is reporting the wrong last offset", log.logSegments.last.offsetIndex.lastOffset - log.logStartOffset <= Int.MaxValue) // grouping should result in two groups because the second segment takes the offset range > MaxInt val groups = cleaner.groupSegmentsBySize(log.logSegments, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(2, groups.size) for (group <- groups) assertTrue("Relative offset greater than Int.MaxValue", group.last.readNextOffset - 1 - group.head.baseOffset <= Int.MaxValue) checkSegmentOrder(groups) } private def checkSegmentOrder(groups: Seq[Seq[LogSegment]]): Unit = { val offsets = groups.flatMap(_.map(_.baseOffset)) assertEquals("Offsets should be in increasing order.", offsets.sorted, offsets) } /** * Test building an offset map off the log */ @Test def testBuildOffsetMap(): Unit = { val map = new FakeOffsetMap(1000) val log = makeLog() val cleaner = makeCleaner(Int.MaxValue) val start = 0 val end = 500 writeToLog(log, (start until end) zip (start until end)) def checkRange(map: FakeOffsetMap, start: Int, end: Int) { val stats = new CleanerStats() cleaner.buildOffsetMap(log, start, end, map, stats) val endOffset = map.latestOffset + 1 assertEquals("Last offset should be the end offset.", end, endOffset) assertEquals("Should have the expected number of messages in the map.", end-start, map.size) for(i <- start until end) assertEquals("Should find all the keys", i.toLong, map.get(key(i))) assertEquals("Should not find a value too small", -1L, map.get(key(start - 1))) assertEquals("Should not find a value too large", -1L, map.get(key(end))) assertEquals(end - start, stats.mapMessagesRead) } val segments = log.logSegments.toSeq checkRange(map, 0, segments(1).baseOffset.toInt) checkRange(map, segments(1).baseOffset.toInt, segments(3).baseOffset.toInt) checkRange(map, segments(3).baseOffset.toInt, log.logEndOffset.toInt) } @Test def testSegmentWithOffsetOverflow(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) logProps.put(LogConfig.FileDeleteDelayMsProp, 1000: java.lang.Integer) val config = LogConfig.fromProps(logConfig.originals, logProps) LogTest.initializeLogDirWithOverflowedSegment(dir) val log = makeLog(config = config, recoveryPoint = Long.MaxValue) val segmentWithOverflow = LogTest.firstOverflowSegment(log).getOrElse { fail("Failed to create log with a segment which has overflowed offsets") } val numSegmentsInitial = log.logSegments.size val allKeys = LogTest.keysInLog(log).toList val expectedKeysAfterCleaning = mutable.MutableList[Long]() // pretend we want to clean every alternate key val offsetMap = new FakeOffsetMap(Int.MaxValue) for (k <- 1 until allKeys.size by 2) { expectedKeysAfterCleaning += allKeys(k - 1) offsetMap.put(key(allKeys(k)), Long.MaxValue) } // Try to clean segment with offset overflow. This will trigger log split and the cleaning itself must abort. assertThrows[LogCleaningAbortedException] { cleaner.cleanSegments(log, List(segmentWithOverflow), offsetMap, 0L, new CleanerStats()) } assertEquals(numSegmentsInitial + 1, log.logSegments.size) assertEquals(allKeys, LogTest.keysInLog(log)) assertFalse(LogTest.hasOffsetOverflow(log)) // Clean each segment now that split is complete. for (segmentToClean <- log.logSegments) cleaner.cleanSegments(log, List(segmentToClean), offsetMap, 0L, new CleanerStats()) assertEquals(expectedKeysAfterCleaning, LogTest.keysInLog(log)) assertFalse(LogTest.hasOffsetOverflow(log)) log.close() } /** * Tests recovery if broker crashes at the following stages during the cleaning sequence * <ol> * <li> Cleaner has created .cleaned log containing multiple segments, swap sequence not yet started * <li> .cleaned log renamed to .swap, old segment files not yet renamed to .deleted * <li> .cleaned log renamed to .swap, old segment files renamed to .deleted, but not yet deleted * <li> .swap suffix removed, completing the swap, but async delete of .deleted files not yet complete * </ol> */ @Test def testRecoveryAfterCrash(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 300: java.lang.Integer) logProps.put(LogConfig.IndexIntervalBytesProp, 1: java.lang.Integer) logProps.put(LogConfig.FileDeleteDelayMsProp, 10: java.lang.Integer) val config = LogConfig.fromProps(logConfig.originals, logProps) // create a log and append some messages var log = makeLog(config = config) var messageCount = 0 while (log.numberOfSegments < 10) { log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } val allKeys = LogTest.keysInLog(log) // pretend we have odd-numbered keys val offsetMap = new FakeOffsetMap(Int.MaxValue) for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) // clean the log cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) // clear scheduler so that async deletes don't run time.scheduler.clear() var cleanedKeys = LogTest.keysInLog(log) log.close() // 1) Simulate recovery just after .cleaned file is created, before rename to .swap // On recovery, clean operation is aborted. All messages should be present in the log log.logSegments.head.changeFileSuffixes("", Log.CleanedFileSuffix) for (file <- dir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, ""))) } log = recoverAndCheck(config, allKeys) // clean again cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) // clear scheduler so that async deletes don't run time.scheduler.clear() cleanedKeys = LogTest.keysInLog(log) log.close() // 2) Simulate recovery just after swap file is created, before old segment files are // renamed to .deleted. Clean operation is resumed during recovery. log.logSegments.head.changeFileSuffixes("", Log.SwapFileSuffix) for (file <- dir.listFiles if file.getName.endsWith(Log.DeletedFileSuffix)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(CoreUtils.replaceSuffix(file.getPath, Log.DeletedFileSuffix, ""))) } log = recoverAndCheck(config, cleanedKeys) // add some more messages and clean the log again while (log.numberOfSegments < 10) { log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) // clear scheduler so that async deletes don't run time.scheduler.clear() cleanedKeys = LogTest.keysInLog(log) // 3) Simulate recovery after swap file is created and old segments files are renamed // to .deleted. Clean operation is resumed during recovery. log.logSegments.head.changeFileSuffixes("", Log.SwapFileSuffix) log = recoverAndCheck(config, cleanedKeys) // add some more messages and clean the log again while (log.numberOfSegments < 10) { log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) cleaner.cleanSegments(log, log.logSegments.take(9).toSeq, offsetMap, 0L, new CleanerStats()) // clear scheduler so that async deletes don't run time.scheduler.clear() cleanedKeys = LogTest.keysInLog(log) log.close() // 4) Simulate recovery after swap is complete, but async deletion // is not yet complete. Clean operation is resumed during recovery. log = recoverAndCheck(config, cleanedKeys) log.close() } @Test def testBuildOffsetMapFakeLarge(): Unit = { val map = new FakeOffsetMap(1000) val logProps = new Properties() logProps.put(LogConfig.SegmentBytesProp, 120: java.lang.Integer) logProps.put(LogConfig.SegmentIndexBytesProp, 120: java.lang.Integer) logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact) val logConfig = LogConfig(logProps) val log = makeLog(config = logConfig) val cleaner = makeCleaner(Int.MaxValue) val keyStart = 0 val keyEnd = 2 val offsetStart = 0L val offsetEnd = 7206178L val offsetSeq = Seq(offsetStart, offsetEnd) writeToLog(log, (keyStart until keyEnd) zip (keyStart until keyEnd), offsetSeq) cleaner.buildOffsetMap(log, keyStart, offsetEnd + 1L, map, new CleanerStats()) assertEquals("Last offset should be the end offset.", offsetEnd, map.latestOffset) assertEquals("Should have the expected number of messages in the map.", keyEnd - keyStart, map.size) assertEquals("Map should contain first value", 0L, map.get(key(0))) assertEquals("Map should contain second value", offsetEnd, map.get(key(1))) } /** * Test building a partial offset map of part of a log segment */ @Test def testBuildPartialOffsetMap(): Unit = { // because loadFactor is 0.75, this means we can fit 2 messages in the map val map = new FakeOffsetMap(3) val log = makeLog() val cleaner = makeCleaner(2) log.appendAsLeader(record(0,0), leaderEpoch = 0) log.appendAsLeader(record(1,1), leaderEpoch = 0) log.appendAsLeader(record(2,2), leaderEpoch = 0) log.appendAsLeader(record(3,3), leaderEpoch = 0) log.appendAsLeader(record(4,4), leaderEpoch = 0) log.roll() val stats = new CleanerStats() cleaner.buildOffsetMap(log, 2, Int.MaxValue, map, stats) assertEquals(2, map.size) assertEquals(-1, map.get(key(0))) assertEquals(2, map.get(key(2))) assertEquals(3, map.get(key(3))) assertEquals(-1, map.get(key(4))) assertEquals(4, stats.mapMessagesRead) } /** * This test verifies that messages corrupted by KAFKA-4298 are fixed by the cleaner */ @Test def testCleanCorruptMessageSet() { val codec = CompressionType.GZIP val logProps = new Properties() logProps.put(LogConfig.CompressionTypeProp, codec.name) val logConfig = LogConfig(logProps) val log = makeLog(config = logConfig) val cleaner = makeCleaner(10) // messages are constructed so that the payload matches the expecting offset to // make offset validation easier after cleaning // one compressed log entry with duplicates val dupSetKeys = (0 until 2) ++ (0 until 2) val dupSetOffset = 25 val dupSet = dupSetKeys zip (dupSetOffset until dupSetOffset + dupSetKeys.size) // and one without (should still be fixed by the cleaner) val noDupSetKeys = 3 until 5 val noDupSetOffset = 50 val noDupSet = noDupSetKeys zip (noDupSetOffset until noDupSetOffset + noDupSetKeys.size) log.appendAsFollower(invalidCleanedMessage(dupSetOffset, dupSet, codec)) log.appendAsFollower(invalidCleanedMessage(noDupSetOffset, noDupSet, codec)) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) for (segment <- log.logSegments; batch <- segment.log.batches.asScala; record <- batch.asScala) { assertTrue(record.hasMagic(batch.magic)) val value = TestUtils.readString(record.value).toLong assertEquals(record.offset, value) } } /** * Verify that the client can handle corrupted messages. Located here for now since the client * does not support writing messages with the old magic. */ @Test def testClientHandlingOfCorruptMessageSet(): Unit = { import JavaConverters._ val keys = 1 until 10 val offset = 50 val set = keys zip (offset until offset + keys.size) val corruptedMessage = invalidCleanedMessage(offset, set) val records = MemoryRecords.readableRecords(corruptedMessage.buffer) for (logEntry <- records.records.asScala) { val offset = logEntry.offset val value = TestUtils.readString(logEntry.value).toLong assertEquals(offset, value) } } @Test def testCleanTombstone(): Unit = { val logConfig = LogConfig(new Properties()) val log = makeLog(config = logConfig) val cleaner = makeCleaner(10) // Append a message with a large timestamp. log.appendAsLeader(TestUtils.singletonRecords(value = "0".getBytes, key = "0".getBytes, timestamp = time.milliseconds() + logConfig.deleteRetentionMs + 10000), leaderEpoch = 0) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) // Append a tombstone with a small timestamp and roll out a new log segment. log.appendAsLeader(TestUtils.singletonRecords(value = null, key = "0".getBytes, timestamp = time.milliseconds() - logConfig.deleteRetentionMs - 10000), leaderEpoch = 0) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 1, log.activeSegment.baseOffset)) assertEquals("The tombstone should be retained.", 1, log.logSegments.head.log.batches.iterator.next().lastOffset) // Append a message and roll out another log segment. log.appendAsLeader(TestUtils.singletonRecords(value = "1".getBytes, key = "1".getBytes, timestamp = time.milliseconds()), leaderEpoch = 0) log.roll() cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals("The tombstone should be retained.", 1, log.logSegments.head.log.batches.iterator.next().lastOffset) } private def writeToLog(log: Log, keysAndValues: Iterable[(Int, Int)], offsetSeq: Iterable[Long]): Iterable[Long] = { for(((key, value), offset) <- keysAndValues.zip(offsetSeq)) yield log.appendAsFollower(messageWithOffset(key, value, offset)).lastOffset } private def invalidCleanedMessage(initialOffset: Long, keysAndValues: Iterable[(Int, Int)], codec: CompressionType = CompressionType.GZIP): MemoryRecords = { // this function replicates the old versions of the cleaner which under some circumstances // would write invalid compressed message sets with the outer magic set to 1 and the inner // magic set to 0 val records = keysAndValues.map(kv => LegacyRecord.create(RecordBatch.MAGIC_VALUE_V0, RecordBatch.NO_TIMESTAMP, kv._1.toString.getBytes, kv._2.toString.getBytes)) val buffer = ByteBuffer.allocate(math.min(math.max(records.map(_.sizeInBytes()).sum / 2, 1024), 1 << 16)) val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, codec, TimestampType.CREATE_TIME, initialOffset) var offset = initialOffset records.foreach { record => builder.appendUncheckedWithOffset(offset, record) offset += 1 } builder.build() } private def messageWithOffset(key: Array[Byte], value: Array[Byte], offset: Long): MemoryRecords = MemoryRecords.withRecords(offset, CompressionType.NONE, 0, new SimpleRecord(key, value)) private def messageWithOffset(key: Int, value: Int, offset: Long): MemoryRecords = messageWithOffset(key.toString.getBytes, value.toString.getBytes, offset) private def makeLog(dir: File = dir, config: LogConfig = logConfig, recoveryPoint: Long = 0L) = Log(dir = dir, config = config, logStartOffset = 0L, recoveryPoint = recoveryPoint, scheduler = time.scheduler, time = time, brokerTopicStats = new BrokerTopicStats, maxProducerIdExpirationMs = 60 * 60 * 1000, producerIdExpirationCheckIntervalMs = LogManager.ProducerIdExpirationCheckIntervalMs, logDirFailureChannel = new LogDirFailureChannel(10)) private def makeCleaner(capacity: Int, checkDone: TopicPartition => Unit = _ => (), maxMessageSize: Int = 64*1024) = new Cleaner(id = 0, offsetMap = new FakeOffsetMap(capacity), ioBufferSize = maxMessageSize, maxIoBufferSize = maxMessageSize, dupBufferLoadFactor = 0.75, throttler = throttler, time = time, checkDone = checkDone) private def writeToLog(log: Log, seq: Iterable[(Int, Int)]): Iterable[Long] = { for ((key, value) <- seq) yield log.appendAsLeader(record(key, value), leaderEpoch = 0).firstOffset.get } private def key(id: Long) = ByteBuffer.wrap(id.toString.getBytes) private def record(key: Int, value: Int, producerId: Long = RecordBatch.NO_PRODUCER_ID, producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH, sequence: Int = RecordBatch.NO_SEQUENCE, partitionLeaderEpoch: Int = RecordBatch.NO_PARTITION_LEADER_EPOCH): MemoryRecords = { MemoryRecords.withIdempotentRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, CompressionType.NONE, producerId, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord(key.toString.getBytes, value.toString.getBytes)) } private def appendTransactionalAsLeader(log: Log, producerId: Long, producerEpoch: Short): Seq[Int] => LogAppendInfo = { appendIdempotentAsLeader(log, producerId, producerEpoch, isTransactional = true) } private def appendIdempotentAsLeader(log: Log, producerId: Long, producerEpoch: Short, isTransactional: Boolean = false): Seq[Int] => LogAppendInfo = { var sequence = 0 keys: Seq[Int] => { val simpleRecords = keys.map { key => val keyBytes = key.toString.getBytes new SimpleRecord(time.milliseconds(), keyBytes, keyBytes) // the value doesn't matter since we validate offsets } val records = if (isTransactional) MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, producerEpoch, sequence, simpleRecords: _*) else MemoryRecords.withIdempotentRecords(CompressionType.NONE, producerId, producerEpoch, sequence, simpleRecords: _*) sequence += simpleRecords.size log.appendAsLeader(records, leaderEpoch = 0) } } private def commitMarker(producerId: Long, producerEpoch: Short, timestamp: Long = time.milliseconds()): MemoryRecords = endTxnMarker(producerId, producerEpoch, ControlRecordType.COMMIT, 0L, timestamp) private def abortMarker(producerId: Long, producerEpoch: Short, timestamp: Long = time.milliseconds()): MemoryRecords = endTxnMarker(producerId, producerEpoch, ControlRecordType.ABORT, 0L, timestamp) private def endTxnMarker(producerId: Long, producerEpoch: Short, controlRecordType: ControlRecordType, offset: Long, timestamp: Long): MemoryRecords = { val endTxnMarker = new EndTransactionMarker(controlRecordType, 0) MemoryRecords.withEndTransactionMarker(offset, timestamp, RecordBatch.NO_PARTITION_LEADER_EPOCH, producerId, producerEpoch, endTxnMarker) } private def record(key: Int, value: Array[Byte]): MemoryRecords = TestUtils.singletonRecords(key = key.toString.getBytes, value = value) private def unkeyedRecord(value: Int): MemoryRecords = TestUtils.singletonRecords(value = value.toString.getBytes) private def tombstoneRecord(key: Int): MemoryRecords = record(key, null) private def recoverAndCheck(config: LogConfig, expectedKeys: Iterable[Long]): Log = { LogTest.recoverAndCheck(dir, config, expectedKeys, new BrokerTopicStats(), time, time.scheduler) } } class FakeOffsetMap(val slots: Int) extends OffsetMap { val map = new java.util.HashMap[String, Long]() var lastOffset = -1L private def keyFor(key: ByteBuffer) = new String(Utils.readBytes(key.duplicate), "UTF-8") override def put(key: ByteBuffer, offset: Long): Unit = { lastOffset = offset map.put(keyFor(key), offset) } override def get(key: ByteBuffer): Long = { val k = keyFor(key) if(map.containsKey(k)) map.get(k) else -1L } override def clear(): Unit = map.clear() override def size: Int = map.size override def latestOffset: Long = lastOffset override def updateLatestOffset(offset: Long): Unit = { lastOffset = offset } override def toString: String = map.toString }
Esquive/kafka
core/src/test/scala/unit/kafka/log/LogCleanerTest.scala
Scala
apache-2.0
70,717
package org.http4s.internal import scala.collection.immutable import scala.collection.mutable import scala.collection.mutable.ListBuffer private[http4s] object CollectionCompat { type LazyList[A] = scala.collection.immutable.LazyList[A] val LazyList = scala.collection.immutable.LazyList def pairsToMultiParams[K, V](map: collection.Seq[(K, Option[V])]): Map[K, immutable.Seq[V]] = if (map.isEmpty) Map.empty else { val m = mutable.Map.empty[K, ListBuffer[V]] map.foreach { case (k, None) => m.getOrElseUpdate(k, new ListBuffer) case (k, Some(v)) => m.getOrElseUpdate(k, new ListBuffer) += v } m.view.mapValues(_.toList).toMap } def mapValues[K, A, B](map: Map[K, A])(f: A => B): Map[K, B] = map.view.mapValues(f).toMap val CollectionConverters = scala.jdk.CollectionConverters }
aeons/http4s
core/src/main/scala-2.13/org/http4s/internal/CollectionCompat.scala
Scala
apache-2.0
851
package models.generator import io.apibuilder.generator.v0.models.InvocationForm import io.apibuilder.spec.v0.models.Interface import models.TestHelper import org.scalatest.funspec.AnyFunSpec import org.scalatest.matchers.should.Matchers import scala.generator.{ScalaCaseClasses, ScalaService} class ScalaCaseClassesInterfacesSpec extends AnyFunSpec with Matchers with helpers.ServiceHelpers { private[this] def build(interfaces: Seq[Interface]): ScalaService = { ScalaService( makeService(interfaces = interfaces) ) } it("interfaceWithNoFields") { val ssd = build( Seq(makeInterface( name = "person", fields = Nil, )) ) models.TestHelper.assertEqualsFile( "/generators/ScalaCaseClassesInterfacesSpec.interfaceWithNoFields.json", ScalaCaseClasses.generateTrait(ssd.interfaces.head) ) } it("interfaceWithSingleField") { val ssd = build( Seq(makeInterface( name = "person", fields = Seq(makeField(name = "first")), )) ) models.TestHelper.assertEqualsFile( "/generators/ScalaCaseClassesInterfacesSpec.interfaceWithSingleField.json", ScalaCaseClasses.generateTrait(ssd.interfaces.head) ) } it("interfacesWithDeprecation") { val service = models.TestHelper.parseFile(s"/examples/interfaces.json") ScalaCaseClasses.invoke(InvocationForm(service = service)) match { case Left(errors) => fail(errors.mkString(", ")) case Right(sourceFiles) => { sourceFiles.size shouldBe 1 TestHelper.assertValidScalaSourceFiles(sourceFiles) models.TestHelper.assertEqualsFile( "/generators/scala-models-interfaces.txt", sourceFiles.head.contents ) } } } }
mbryzek/apidoc-generator
scala-generator/src/test/scala/models/generator/ScalaCaseClassesInterfacesSpec.scala
Scala
mit
1,767
package edu.gemini.itc.web.baseline import edu.gemini.itc.baseline._ import edu.gemini.itc.baseline.util._ import edu.gemini.itc.shared._ import edu.gemini.itc.web.baseline.Baseline._ import org.scalacheck.{Arbitrary, Gen} import org.specs2.ScalaCheck import org.specs2.mutable.Specification import org.specs2.scalacheck.ScalaCheckParameters /** * Spec which compares a limited amount of random ITC "recipe" executions with the expected outcome. * Test are executed by using a hash value generated from the fixture as a key in a map * that contains hash values of the expected output of the recipe execution (currently a string). This baseline * map is stored as a resource file and needs to be updated whenever there are changes to the code that change * the outputs. See [[BaselineTest]] for details. */ object BaselineAllSpec extends Specification with ScalaCheck with ScalaCheckParameters { // default number of tests is 100, that takes a bit too long private val minTestsCnt = 10 // === ACQUISITION CAMERA { implicit val arbFixture: Arbitrary[Fixture[AcquisitionCamParameters]] = Arbitrary { Gen.oneOf(BaselineAcqCam.Fixtures) } "Acquisition Camera calculations" should { "match latest baseline" ! prop { f: Fixture[AcquisitionCamParameters] => checkAgainstBaseline(Baseline.from(f, executeAcqCamRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === F2 { implicit val arbFixture: Arbitrary[Fixture[Flamingos2Parameters]] = Arbitrary { Gen.oneOf(BaselineF2.Fixtures) } "Flamingos2 calculations" should { "match latest baseline" ! prop { f: Fixture[Flamingos2Parameters] => checkAgainstBaseline(Baseline.from(f, executeF2Recipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === GMOS { implicit val arbFixture: Arbitrary[Fixture[GmosParameters]] = Arbitrary { Gen.oneOf(BaselineGmos.Fixtures) } "GMOS calculations" should { "match latest baseline" ! prop { f: Fixture[GmosParameters] => checkAgainstBaseline(Baseline.from(f, executeGmosRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === GNIRS { implicit val arbFixture: Arbitrary[Fixture[GnirsParameters]] = Arbitrary { Gen.oneOf(BaselineGnirs.Fixtures) } "GNIRS calculations" should { "match latest baseline" ! prop { f: Fixture[GnirsParameters] => checkAgainstBaseline(Baseline.from(f, executeGnirsRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === GSAOI { implicit val arbFixture: Arbitrary[Fixture[GsaoiParameters]] = Arbitrary { Gen.oneOf(BaselineGsaoi.Fixtures) } "GSAOI calculations" should { "match latest baseline" ! prop { f: Fixture[GsaoiParameters] => checkAgainstBaseline(Baseline.from(f, executeGsaoiRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === Michelle { implicit val arbFixture: Arbitrary[Fixture[MichelleParameters]] = Arbitrary { Gen.oneOf(BaselineMichelle.Fixtures) } "Michelle calculations" should { "match latest baseline" ! prop { f: Fixture[MichelleParameters] => checkAgainstBaseline(Baseline.from(f, executeMichelleRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === NIFS { implicit val arbFixture: Arbitrary[Fixture[NifsParameters]] = Arbitrary { Gen.oneOf(BaselineNifs.Fixtures) } "NIFS calculations" should { "match latest baseline" ! prop { f: Fixture[NifsParameters] => checkAgainstBaseline(Baseline.from(f, executeNifsRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === NIRI { implicit val arbFixture: Arbitrary[Fixture[NiriParameters]] = Arbitrary { Gen.oneOf(BaselineNiri.Fixtures) } "NIRI calculations" should { "match latest baseline" ! prop { f: Fixture[NiriParameters] => checkAgainstBaseline(Baseline.from(f, executeNiriRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } // === TRecs { implicit val arbFixture: Arbitrary[Fixture[TRecsParameters]] = Arbitrary { Gen.oneOf(BaselineTRecs.Fixtures) } "TRecs calculations" should { "match latest baseline" ! prop { f: Fixture[TRecsParameters] => checkAgainstBaseline(Baseline.from(f, executeTrecsRecipe(f))) }.set(defaultParameters.minTestsOk, minTestsCnt) } } }
spakzad/ocs
bundle/edu.gemini.itc.web/src/test/scala/edu/gemini/itc/web/baseline/BaselineAllSpec.scala
Scala
bsd-3-clause
4,535
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka import java.util.Properties import java.util.concurrent.atomic._ import kafka.log._ import kafka.utils._ import org.apache.kafka.clients.consumer.OffsetOutOfRangeException import org.apache.kafka.common.record.FileRecords import org.apache.kafka.common.utils.Utils /** * A stress test that instantiates a log and then runs continual appends against it from one thread and continual reads against it * from another thread and checks a few basic assertions until the user kills the process. */ object StressTestLog { val running = new AtomicBoolean(true) def main(args: Array[String]) { val dir = TestUtils.randomPartitionLogDir(TestUtils.tempDir()) val time = new MockTime val logProperties = new Properties() logProperties.put(LogConfig.SegmentBytesProp, 64*1024*1024: java.lang.Integer) logProperties.put(LogConfig.MaxMessageBytesProp, Int.MaxValue: java.lang.Integer) logProperties.put(LogConfig.SegmentIndexBytesProp, 1024*1024: java.lang.Integer) val log = new Log(dir = dir, config = LogConfig(logProperties), logStartOffset = 0L, recoveryPoint = 0L, scheduler = time.scheduler, time = time) val writer = new WriterThread(log) writer.start() val reader = new ReaderThread(log) reader.start() Runtime.getRuntime().addShutdownHook(new Thread() { override def run() = { running.set(false) writer.join() reader.join() Utils.delete(dir) } }) while(running.get) { println("Reader offset = %d, writer offset = %d".format(reader.offset, writer.offset)) Thread.sleep(1000) } } abstract class WorkerThread extends Thread { override def run() { try { while(running.get) work() } catch { case e: Exception => e.printStackTrace() running.set(false) } println(getClass.getName + " exiting...") } def work() } class WriterThread(val log: Log) extends WorkerThread { @volatile var offset = 0 override def work() { val logAppendInfo = log.appendAsFollower(TestUtils.singletonRecords(offset.toString.getBytes)) require(logAppendInfo.firstOffset == offset && logAppendInfo.lastOffset == offset) offset += 1 if(offset % 1000 == 0) Thread.sleep(500) } } class ReaderThread(val log: Log) extends WorkerThread { @volatile var offset = 0 override def work() { try { log.read(offset, 1024, Some(offset+1)).records match { case read: FileRecords if read.sizeInBytes > 0 => { val first = read.batches.iterator.next() require(first.lastOffset == offset, "We should either read nothing or the message we asked for.") require(first.sizeInBytes == read.sizeInBytes, "Expected %d but got %d.".format(first.sizeInBytes, read.sizeInBytes)) offset += 1 } case _ => } } catch { case _: OffsetOutOfRangeException => // this is okay } } } }
rhauch/kafka
core/src/test/scala/other/kafka/StressTestLog.scala
Scala
apache-2.0
3,939
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import org.apache.spark.{SharedSparkContext, SparkFunSuite} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{BooleanType, StringType, StructField, StructType} class SQLContextSuite extends SparkFunSuite with SharedSparkContext { object DummyRule extends Rule[LogicalPlan] { def apply(p: LogicalPlan): LogicalPlan = p } test("Sessions of SQLContext") { val sqlContext = SparkSession.builder().sparkContext(sc).getOrCreate().sqlContext val session1 = sqlContext.newSession() val session2 = sqlContext.newSession() // all have the default configurations val key = SQLConf.SHUFFLE_PARTITIONS.key assert(session1.getConf(key) === session2.getConf(key)) session1.setConf(key, "1") session2.setConf(key, "2") assert(session1.getConf(key) === "1") assert(session2.getConf(key) === "2") // temporary table should not be shared val df = session1.range(10) df.createOrReplaceTempView("test1") assert(session1.tableNames().contains("test1")) assert(!session2.tableNames().contains("test1")) // UDF should not be shared def myadd(a: Int, b: Int): Int = a + b session1.udf.register[Int, Int, Int]("myadd", myadd) session1.sql("select myadd(1, 2)").explain() intercept[AnalysisException] { session2.sql("select myadd(1, 2)").explain() } } test("Catalyst optimization passes are modifiable at runtime") { val sqlContext = SparkSession.builder().sparkContext(sc).getOrCreate().sqlContext sqlContext.experimental.extraOptimizations = Seq(DummyRule) assert(sqlContext.sessionState.optimizer.batches.flatMap(_.rules).contains(DummyRule)) } test("get all tables") { val sqlContext = SparkSession.builder().sparkContext(sc).getOrCreate().sqlContext val df = sqlContext.range(10) df.createOrReplaceTempView("listtablessuitetable") assert( sqlContext.tables().filter("tableName = 'listtablessuitetable'").collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) assert( sqlContext.sql("SHOW tables").filter("tableName = 'listtablessuitetable'").collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) sqlContext.sessionState.catalog.dropTable( TableIdentifier("listtablessuitetable"), ignoreIfNotExists = true, purge = false) assert(sqlContext.tables().filter("tableName = 'listtablessuitetable'").count() === 0) } test("getting all tables with a database name has no impact on returned table names") { val sqlContext = SparkSession.builder().sparkContext(sc).getOrCreate().sqlContext val df = sqlContext.range(10) df.createOrReplaceTempView("listtablessuitetable") assert( sqlContext.tables("default").filter("tableName = 'listtablessuitetable'").collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) assert( sqlContext.sql("show TABLES in default").filter("tableName = 'listtablessuitetable'") .collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) sqlContext.sessionState.catalog.dropTable( TableIdentifier("listtablessuitetable"), ignoreIfNotExists = true, purge = false) assert(sqlContext.tables().filter("tableName = 'listtablessuitetable'").count() === 0) } test("query the returned DataFrame of tables") { val sqlContext = SparkSession.builder().sparkContext(sc).getOrCreate().sqlContext val df = sqlContext.range(10) df.createOrReplaceTempView("listtablessuitetable") val expectedSchema = StructType( StructField("database", StringType, false) :: StructField("tableName", StringType, false) :: StructField("isTemporary", BooleanType, false) :: Nil) Seq(sqlContext.tables(), sqlContext.sql("SHOW TABLes")).foreach { tableDF => assert(expectedSchema === tableDF.schema) tableDF.createOrReplaceTempView("tables") assert( sqlContext.sql( "SELECT isTemporary, tableName from tables WHERE tableName = 'listtablessuitetable'") .collect().toSeq == Row(true, "listtablessuitetable") :: Nil) assert( sqlContext.tables().filter("tableName = 'tables'").select("tableName", "isTemporary") .collect().toSeq == Row("tables", true) :: Nil) sqlContext.dropTempTable("tables") } } }
pgandhi999/spark
sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
Scala
apache-2.0
5,330
package com.github.mdr.mash.view.render.help import com.github.mdr.mash.functions.MashFunction import com.github.mdr.mash.view.render.{ CallingSyntaxRenderer, MashRenderer } import com.github.mdr.mash.screen.{ Line, StyledString } import com.github.mdr.mash.utils.LineInfo object FunctionHelpRenderer extends AbstractHelpRenderer { def render(f: MashFunction): LinesAndLinks = LinesAndLinks.combine(Seq( renderNameSection(f), LinesAndLinks(renderCallingSyntaxSection(f)), LinesAndLinks(ParameterHelpRenderer.renderSection(f.params.params)), renderDescriptionSection(f.descriptionOpt), LinesAndLinks(renderSourceSection(f.sourceOpt)))) private def renderNameSection(f: MashFunction): LinesAndLinks = { val names = (f.fullyQualifiedName +: f.aliases).map(_.toString) renderNameSection("FUNCTION", names, f.summaryOpt) } private def renderCallingSyntaxSection(f: MashFunction): Seq[Line] = if (f.params.isEmpty) Seq() else renderCallingSyntaxSection(CallingSyntaxRenderer.render(f)) def renderCallingSyntaxSection(callingSyntax: StyledString): Seq[Line] = Seq( Line.Empty, Line(SectionTitleStyle("CALLING SYNTAX")), Line(IndentSpace + callingSyntax)) def renderSourceSection(sourceOpt: Option[String]): Seq[Line] = sourceOpt.toSeq.flatMap(source ⇒ Seq(Line.Empty, Line(SectionTitleStyle("SOURCE"))) ++ renderSource(source)) private def renderSource(s: String): Seq[Line] = { val renderedSource = new MashRenderer().renderChars(s) new LineInfo(renderedSource.forgetStyling) .lineRegions .map(region ⇒ Line(IndentSpace + region.of(renderedSource.chars))) } }
mdr/mash
src/main/scala/com/github/mdr/mash/view/render/help/FunctionHelpRenderer.scala
Scala
mit
1,697
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler import org.apache.spark.ShuffleDependency import org.apache.spark.rdd.RDD import org.apache.spark.storage.BlockManagerId import org.apache.spark.util.CallSite /** * ShuffleMapStages are intermediate stages in the execution DAG that produce data for a shuffle. * They occur right before each shuffle operation, and might contain multiple pipelined operations * before that (e.g. map and filter). When executed, they save map output files that can later be * fetched by reduce tasks. The `shuffleDep` field describes the shuffle each stage is part of, * and variables like `outputLocs` and `numAvailableOutputs` track how many map outputs are ready. * * ShuffleMapStages can also be submitted independently as jobs with DAGScheduler.submitMapStage. * For such stages, the ActiveJobs that submitted them are tracked in `mapStageJobs`. Note that * there can be multiple ActiveJobs trying to compute the same shuffle map stage. */ private[spark] class ShuffleMapStage( id: Int, rdd: RDD[_], numTasks: Int, parents: List[Stage], firstJobId: Int, callSite: CallSite, val shuffleDep: ShuffleDependency[_, _, _]) extends Stage(id, rdd, numTasks, parents, firstJobId, callSite) { private[this] var _mapStageJobs: List[ActiveJob] = Nil private[this] var _numAvailableOutputs: Int = 0 /** Identify whether this stage is a shuffleMap stage. * Added by chenfei */ isShuffleMap = true /** * List of [[MapStatus]] for each partition. The index of the array is the map partition id, * and each value in the array is the list of possible [[MapStatus]] for a partition * (a single task might run multiple times). */ private[this] val outputLocs = Array.fill[List[MapStatus]](numPartitions)(Nil) override def toString: String = "ShuffleMapStage " + id /** * Returns the list of active jobs, * i.e. map-stage jobs that were submitted to execute this stage independently (if any). */ def mapStageJobs: Seq[ActiveJob] = _mapStageJobs /** Adds the job to the active job list. */ def addActiveJob(job: ActiveJob): Unit = { _mapStageJobs = job :: _mapStageJobs } /** Removes the job from the active job list. */ def removeActiveJob(job: ActiveJob): Unit = { _mapStageJobs = _mapStageJobs.filter(_ != job) } /** * Number of partitions that have shuffle outputs. * When this reaches [[numPartitions]], this map stage is ready. * This should be kept consistent as `outputLocs.filter(!_.isEmpty).size`. */ def numAvailableOutputs: Int = _numAvailableOutputs /** * Returns true if the map stage is ready, i.e. all partitions have shuffle outputs. * This should be the same as `outputLocs.contains(Nil)`. */ def isAvailable: Boolean = _numAvailableOutputs == numPartitions /** Returns the sequence of partition ids that are missing (i.e. needs to be computed). */ override def findMissingPartitions(): Seq[Int] = { val missing = (0 until numPartitions).filter(id => outputLocs(id).isEmpty) assert(missing.size == numPartitions - _numAvailableOutputs, s"${missing.size} missing, expected ${numPartitions - _numAvailableOutputs}") missing } def addOutputLoc(partition: Int, status: MapStatus): Unit = { val prevList = outputLocs(partition) outputLocs(partition) = status :: prevList if (prevList == Nil) { _numAvailableOutputs += 1 } } def removeOutputLoc(partition: Int, bmAddress: BlockManagerId): Unit = { val prevList = outputLocs(partition) val newList = prevList.filterNot(_.location == bmAddress) outputLocs(partition) = newList if (prevList != Nil && newList == Nil) { _numAvailableOutputs -= 1 } } /** * Returns an array of [[MapStatus]] (index by partition id). For each partition, the returned * value contains only one (i.e. the first) [[MapStatus]]. If there is no entry for the partition, * that position is filled with null. */ def outputLocInMapOutputTrackerFormat(): Array[MapStatus] = { outputLocs.map(_.headOption.orNull) } /** * Removes all shuffle outputs associated with this executor. Note that this will also remove * outputs which are served by an external shuffle server (if one exists), as they are still * registered with this execId. */ def removeOutputsOnExecutor(execId: String): Unit = { var becameUnavailable = false for (partition <- 0 until numPartitions) { val prevList = outputLocs(partition) val newList = prevList.filterNot(_.location.executorId == execId) outputLocs(partition) = newList if (prevList != Nil && newList == Nil) { becameUnavailable = true _numAvailableOutputs -= 1 } } if (becameUnavailable) { logInfo("%s is now unavailable on executor %s (%d/%d, %s)".format( this, execId, _numAvailableOutputs, numPartitions, isAvailable)) } } }
u2009cf/spark-radar
core/src/main/scala/org/apache/spark/scheduler/ShuffleMapStage.scala
Scala
apache-2.0
5,741
/* * Copyright 2015 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.releaser package object domain { type CommitSha = String // type Repo = String type ArtefactName = String type Url = String case class Repo(value:String) extends AnyVal }
xnejp03/releaser
src/main/scala/uk/gov/hmrc/releaser/domain/package.scala
Scala
apache-2.0
808
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala package reflect package api /** * <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span> * * A trait that defines types and operations on them. * * Type instances represent information about the type of a corresponding symbol. This includes its members * (methods, fields, type parameters, nested classes, traits, etc.) either declared directly or inherited, its base types, * its erasure and so on. Types also provide operations to test for type conformance or equivalence or for widening. * * To instantiate a type, most of the time, the [[scala.reflect.api.TypeTags#typeOf]] method can be used. It takes * a type argument and produces a `Type` instance which represents that argument. For example: * * {{{ * scala> typeOf[List[Int]] * res0: reflect.runtime.universe.Type = scala.List[Int] * }}} * * In this example, a [[scala.reflect.api.Types#TypeRef]] is returned, which corresponds to the type constructor `List` * applied to the type argument `Int`. * * In the case of a generic type, you can also combine it with other types * using [[scala.reflect.api.Types#appliedType]]. For example: * * {{{ * scala> val intType = typeOf[Int] * intType: reflect.runtime.universe.Type = Int * * scala> val listType = typeOf[List[_]] * listType: reflect.runtime.universe.Type = List[_] * * scala> appliedType(listType.typeConstructor, intType) * res0: reflect.runtime.universe.Type = List[Int] * }}} * * ''Note:'' Method `typeOf` does not work for types with type parameters, such as `typeOf[List[A]]` where `A` is * a type parameter. In this case, use [[scala.reflect.api.TypeTags#weakTypeOf]] instead. * * For other ways to instantiate types, see the [[https://docs.scala-lang.org/overviews/reflection/symbols-trees-types.html corresponding section of the Reflection Guide]]. * * === Common Operations on Types === * * Types are typically used for type conformance tests or are queried for declarations of members or inner types. * * - '''Subtyping Relationships''' can be tested using `<:<` and `weak_<:<`. * - '''Type Equality''' can be checked with `=:=`. It's important to note that `==` should not be used to compare types for equality-- `==` can't check for type equality in the presence of type aliases, while `=:=` can. * * Types can be queried for members and declarations by using the `members` and `declarations` methods (along with * their singular counterparts `member` and `declaration`), which provide the list of definitions associated with that type. * For example, to look up the `map` method of `List`, one can do: * * {{{ * scala> typeOf[List[_]].member(TermName("map")) * res1: reflect.runtime.universe.Symbol = method map * }}} * * For more information about `Type`s, see the [[https://docs.scala-lang.org/overviews/reflection/symbols-trees-types.html Reflection Guide: Symbols, Trees, and Types]] * * @groupname TypeCreators Types - Creation * @groupname TypeOps Types - Operations * @group ReflectionAPI * @contentDiagram hideNodes "*Api" */ trait Types { self: Universe => /** The type of Scala types, and also Scala type signatures. * (No difference is internally made between the two). * @template * @group Types */ type Type >: Null <: AnyRef with TypeApi /** This constant is used as a special value that indicates that no meaningful type exists. * @group Types */ val NoType: Type /** This constant is used as a special value denoting the empty prefix in a path dependent type. * For instance `x.type` is represented as `SingleType(NoPrefix, <x>)`, where `<x>` stands for * the symbol for `x`. * @group Types */ val NoPrefix: Type /** The API of types. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API * * @define dealiasWidenWarning Note that type aliases can hide beneath * singleton types and singleton types can hide inside type aliases. * Moreover, aliases might lurk in the upper bounds of abstract types. * Therefore careful thought has to be applied to identify and carry out * unwrapping logic specific to your use case. */ abstract class TypeApi { /** The term symbol associated with the type, or `NoSymbol` for types * that do not refer to a term symbol. */ def termSymbol: Symbol /** The type symbol associated with the type, or `NoSymbol` for types * that do not refer to a type symbol. */ def typeSymbol: Symbol /** @see [[decl]] */ @deprecated("use `decl` instead", "2.11.0") def declaration(name: Name): Symbol /** The defined or declared members with name `name` in this type; * an OverloadedSymbol if several exist, NoSymbol if none exist. * Alternatives of overloaded symbol appear in the order they are declared. */ def decl(name: Name): Symbol /** @see [[decls]] */ @deprecated("use `decls` instead", "2.11.0") def declarations: MemberScope /** A `Scope` containing directly declared members of this type. * Unlike `members` this method doesn't returns inherited members. * * Members in the returned scope might appear in arbitrary order. * Use `decls.sorted` to get an ordered list of members. */ def decls: MemberScope /** The member with given name, either directly declared or inherited, * an OverloadedSymbol if several exist, NoSymbol if none exist. */ def member(name: Name): Symbol /** A `Scope` containing all members of this type (directly declared or inherited). * Unlike `declarations` this method also returns inherited members. * * Members in the returned scope might appear in arbitrary order. * Use `members.sorted` to get an ordered list of members. */ def members: MemberScope /** Type signature of the companion of the underlying class symbol. * NoType if the underlying symbol is not a class symbol, or if it doesn't have a companion. */ def companion: Type /** Is this type a type constructor that is missing its type arguments? */ def takesTypeArgs: Boolean /** Returns the corresponding type constructor (e.g. List for List[T] or List[String]) */ def typeConstructor: Type /** Reduce to beta eta-long normal form. * Expands type aliases and converts higher-kinded TypeRefs to PolyTypes. * Functions on types are also implemented as PolyTypes. * * Example: (in the below, <List> is the type constructor of List) * TypeRef(pre, <List>, List()) is replaced by * PolyType(X, TypeRef(pre, <List>, List(X))) */ @deprecated("use `dealias` or `etaExpand` instead", "2.11.0") def normalize: Type /** Converts higher-kinded TypeRefs to PolyTypes. * Functions on types are also implemented as PolyTypes. * * Example: (in the below, <List> is the type constructor of List) * TypeRef(pre, <List>, List()) is replaced by * PolyType(X, TypeRef(pre, <List>, List(X))) */ def etaExpand: Type /** Does this type conform to given type argument `that`? */ def <:< (that: Type): Boolean /** Does this type weakly conform to given type argument `that`, i.e., either conforms in terms of `<:<` or both are primitive number types * that conform according to Section "Weak Conformance" in the spec. For example, Int weak_<:< Long. */ def weak_<:<(that: Type): Boolean /** Is this type equivalent to given type argument `that`? */ def =:= (that: Type): Boolean /** The list of all base classes of this type (including its own typeSymbol) * in linearization order, starting with the class itself and ending * in class Any. */ def baseClasses: List[Symbol] /** The least type instance of given class which is a super-type * of this type. Example: * {{{ * class D[T] * class C extends p.D[Int] * ThisType(C).baseType(D) = p.D[Int] * }}} */ def baseType(clazz: Symbol): Type /** This type as seen from prefix `pre` and class `clazz`. This means: * Replace all `ThisType`s of `clazz` or one of its subclasses * by `pre` and instantiate all parameters by arguments of `pre`. * Proceed analogously for `ThisType`s referring to outer classes. * * Example: * {{{ * scala> import scala.reflect.runtime.universe._ * import scala.reflect.runtime.universe._ * * scala> class D[T] { def m: T = ??? } * defined class D * * scala> class C extends D[Int] * defined class C * * scala> val D = typeOf[D[_]].typeSymbol.asClass * D: reflect.runtime.universe.ClassSymbol = class D * * scala> val C = typeOf[C].typeSymbol.asClass * C: reflect.runtime.universe.ClassSymbol = class C * * scala> val T = D.typeParams(0).asType.toType * T: reflect.runtime.universe.Type = T * * scala> T.asSeenFrom(ThisType(C), D) * res0: reflect.runtime.universe.Type = scala.Int * }}} */ def asSeenFrom(pre: Type, clazz: Symbol): Type /** The erased type corresponding to this type after * all transformations from Scala to Java have been performed. */ def erasure: Type /** If this is a singleton type, widen it to its nearest underlying non-singleton * base type by applying one or more `underlying` dereferences. * If this is not a singleton type, returns this type itself. * * Example: * * class Outer { class C ; val x: C } * val o: Outer * <o.x.type>.widen = o.C * * $dealiasWidenWarning */ def widen: Type /** Expands type aliases arising from type members. * $dealiasWidenWarning */ def dealias: Type /******* popular methods from subclasses *******/ /** List of type arguments ingrained in this type reference. * Depending on your use case you might or might not want to call `dealias` first. * * {{{ * scala> type T = List[Int] * defined type alias T * * scala> typeOf[T].typeArgs * res0: List[reflect.runtime.universe.Type] = List() * * scala> typeOf[T].dealias.typeArgs * res1: List[reflect.runtime.universe.Type] = List(scala.Int) * }}} */ def typeArgs: List[Type] /** @see [[paramLists]] */ @deprecated("use `paramLists` instead", "2.11.0") def paramss: List[List[Symbol]] /** For a method or poly type, a list of its value parameter sections, * the empty list of lists for all other types. */ def paramLists: List[List[Symbol]] /** For a poly type, its type parameters, * the empty list for all other types. */ def typeParams: List[Symbol] /** For a (nullary) method or poly type, its direct result type * (can be a MethodType if the method has multiple argument lists), * the type itself for all other types. * * {{{ * scala> class C { def foo[T](x: T)(y: T) = ??? } * class C * * scala> typeOf[C].member(TermName("foo")).asMethod * res0: reflect.runtime.universe.MethodSymbol = method foo * * scala> res0.info // PolyType wrapping a MethodType * res1: reflect.runtime.universe.Type = [T](x: T)(y: T)scala.Nothing * * scala> res1.resultType // MethodType wrapping a MethodType * res2: reflect.runtime.universe.Type = (x: T)(y: T)scala.Nothing * * scala> res1.resultType.resultType // vanilla MethodType * res3: reflect.runtime.universe.Type = (y: T)scala.Nothing * * scala> res1.resultType.resultType.resultType * res4: reflect.runtime.universe.Type = scala.Nothing * * scala> res1.finalResultType * res5: reflect.runtime.universe.Type = scala.Nothing * }}} * * @see finalResultType */ def resultType: Type /** For a curried/nullary method or poly type its non-method result type, * the type itself for all other types. * * {{{ * scala> class C { * | def foo[T](x: T)(y: T) = ??? * | def bar: Int = ??? * | } * class C * * scala> typeOf[C].member(TermName("foo")).asMethod * res0: reflect.runtime.universe.MethodSymbol = method foo * * scala> res0.info // PolyType wrapping a MethodType * res1: reflect.runtime.universe.Type = [T](x: T)(y: T)scala.Nothing * * scala> res1.resultType // MethodType wrapping a MethodType * res2: reflect.runtime.universe.Type = (x: T)(y: T)scala.Nothing * * scala> res1.resultType.resultType // vanilla MethodType * res3: reflect.runtime.universe.Type = (y: T)scala.Nothing * * scala> res1.resultType.resultType.resultType * res4: reflect.runtime.universe.Type = scala.Nothing * * scala> res1.finalResultType * res5: reflect.runtime.universe.Type = scala.Nothing * * scala> typeOf[C].member(TermName("bar")).asMethod * res6: reflect.runtime.universe.MethodSymbol = method bar * * scala> res6.info // vanilla NullaryMethodType * res7: reflect.runtime.universe.Type = => scala.Int * * scala> res6.info.resultType * res8: reflect.runtime.universe.Type = scala.Int * * scala> res6.info.finalResultType * res9: reflect.runtime.universe.Type = scala.Int * }}} * * @see resultType */ def finalResultType: Type /******************* helpers *******************/ /** Provides an alternate if type is NoType. * * @group Helpers */ def orElse(alt: => Type): Type /** Substitute symbols in `to` for corresponding occurrences of references to * symbols `from` in this type. */ def substituteSymbols(from: List[Symbol], to: List[Symbol]): Type /** Substitute types in `to` for corresponding occurrences of references to * symbols `from` in this type. */ def substituteTypes(from: List[Symbol], to: List[Type]): Type /** Apply `f` to each part of this type, returning * a new type. children get mapped before their parents */ def map(f: Type => Type): Type /** Apply `f` to each part of this type, for side effects only */ def foreach(f: Type => Unit): Unit /** Returns optionally first type (in a preorder traversal) which satisfies predicate `p`, * or None if none exists. */ def find(p: Type => Boolean): Option[Type] /** Is there part of this type which satisfies predicate `p`? */ def exists(p: Type => Boolean): Boolean /** Does this type contain a reference to given symbol? */ def contains(sym: Symbol): Boolean } /** The type of Scala singleton types, i.e., types that are inhabited * by only one nun-null value. These include types of the forms * {{{ * C.this.type * C.super.type * x.type * }}} * as well as [[ConstantType constant types]]. * @template * @group Types */ type SingletonType >: Null <: SingletonTypeApi with Type /** Has no special methods. Is here to provides erased identity for `SingletonType`. * @group API */ trait SingletonTypeApi /** A singleton type that describes types of the form on the left with the * corresponding `ThisType` representation to the right: * {{{ * C.this.type ThisType(C) * }}} * @template * @group Types */ type ThisType >: Null <: ThisTypeApi with SingletonType /** The constructor/extractor for `ThisType` instances. * @group Extractors */ val ThisType: ThisTypeExtractor /** An extractor class to create and pattern match with syntax `ThisType(sym)` * where `sym` is the class prefix of the this type. * @group Extractors */ abstract class ThisTypeExtractor { def unapply(tpe: ThisType): Option[Symbol] /** @see [[Internals.InternalApi.thisType]] */ @deprecated("use `internal.thisType` instead", "2.11.0") def apply(sym: Symbol)(implicit token: CompatToken): Type = internal.thisType(sym) } /** The API that all this types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait ThisTypeApi extends TypeApi { this: ThisType => /** The underlying class symbol. */ def sym: Symbol } /** The `SingleType` type describes types of any of the forms on the left, * with their TypeRef representations to the right. * {{{ * (T # x).type SingleType(T, x) * p.x.type SingleType(p.type, x) * x.type SingleType(NoPrefix, x) * }}} * @template * @group Types */ type SingleType >: Null <: SingleTypeApi with SingletonType /** The constructor/extractor for `SingleType` instances. * @group Extractors */ val SingleType: SingleTypeExtractor /** An extractor class to create and pattern match with syntax `SingleType(pre, sym)` * Here, `pre` is the prefix of the single-type, and `sym` is the stable value symbol * referred to by the single-type. * @group Extractors */ abstract class SingleTypeExtractor { def unapply(tpe: SingleType): Option[(Type, Symbol)] /** @see [[Internals.InternalApi.singleType]] */ @deprecated("use `ClassSymbol.thisPrefix` or `internal.singleType` instead", "2.11.0") def apply(pre: Type, sym: Symbol)(implicit token: CompatToken): Type = internal.singleType(pre, sym) } /** The API that all single types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait SingleTypeApi extends TypeApi { this: SingleType => /** The type of the qualifier. */ def pre: Type /** The underlying symbol. */ def sym: Symbol } /** The `SuperType` type is not directly written, but arises when `C.super` is used * as a prefix in a `TypeRef` or `SingleType`. Its internal presentation is * {{{ * SuperType(thistpe, supertpe) * }}} * Here, `thistpe` is the type of the corresponding this-type. For instance, * in the type arising from C.super, the `thistpe` part would be `ThisType(C)`. * `supertpe` is the type of the super class referred to by the `super`. * @template * @group Types */ type SuperType >: Null <: SuperTypeApi with SingletonType /** The constructor/extractor for `SuperType` instances. * @group Extractors */ val SuperType: SuperTypeExtractor /** An extractor class to create and pattern match with syntax `SuperType(thistpe, supertpe)` * @group Extractors */ abstract class SuperTypeExtractor { def unapply(tpe: SuperType): Option[(Type, Type)] /** @see [[Internals.InternalApi.superType]] */ @deprecated("use `ClassSymbol.superPrefix` or `internal.superType` instead", "2.11.0") def apply(thistpe: Type, supertpe: Type)(implicit token: CompatToken): Type = internal.superType(thistpe, supertpe) } /** The API that all super types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait SuperTypeApi extends TypeApi { this: SuperType => /** The type of the qualifier. * See the example for [[scala.reflect.api.Trees#SuperExtractor]]. */ def thistpe: Type /** The type of the selector. * See the example for [[scala.reflect.api.Trees#SuperExtractor]]. */ def supertpe: Type } /** A `ConstantType` type cannot be expressed in user programs; it is inferred as the type of a constant. * Here are some constants with their types and the internal string representation: * {{{ * 1 ConstantType(Constant(1)) Int(1) * "abc" ConstantType(Constant("abc")) String("abc") * }}} * * ConstantTypes denote values that may safely be constant folded during type checking. * The `deconst` operation returns the equivalent type that will not be constant folded. * * @template * @group Types */ type ConstantType >: Null <: ConstantTypeApi with SingletonType /** The constructor/extractor for `ConstantType` instances. * @group Extractors */ val ConstantType: ConstantTypeExtractor /** An extractor class to create and pattern match with syntax `ConstantType(constant)` * Here, `constant` is the constant value represented by the type. * @group Extractors */ abstract class ConstantTypeExtractor { def unapply(tpe: ConstantType): Option[Constant] /** @see [[Internals.InternalApi.constantType]] */ @deprecated("use `value.tpe` or `internal.constantType` instead", "2.11.0") def apply(value: Constant)(implicit token: CompatToken): ConstantType = internal.constantType(value) } /** The API that all constant types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait ConstantTypeApi extends TypeApi { this: ConstantType => /** The compile-time constant underlying this type. */ def value: Constant } /** The `TypeRef` type describes types of any of the forms on the left, * with their TypeRef representations to the right. * {{{ * T # C[T_1, ..., T_n] TypeRef(T, C, List(T_1, ..., T_n)) * p.C[T_1, ..., T_n] TypeRef(p.type, C, List(T_1, ..., T_n)) * C[T_1, ..., T_n] TypeRef(NoPrefix, C, List(T_1, ..., T_n)) * T # C TypeRef(T, C, Nil) * p.C TypeRef(p.type, C, Nil) * C TypeRef(NoPrefix, C, Nil) * }}} * @template * @group Types */ type TypeRef >: Null <: TypeRefApi with Type /** The constructor/extractor for `TypeRef` instances. * @group Extractors */ val TypeRef: TypeRefExtractor /** An extractor class to create and pattern match with syntax `TypeRef(pre, sym, args)` * Here, `pre` is the prefix of the type reference, `sym` is the symbol * referred to by the type reference, and `args` is a possible empty list of * type arguments. * @group Extractors */ abstract class TypeRefExtractor { def unapply(tpe: TypeRef): Option[(Type, Symbol, List[Type])] /** @see [[Internals.InternalApi.typeRef]] */ @deprecated("use `internal.typeRef` instead", "2.11.0") def apply(pre: Type, sym: Symbol, args: List[Type])(implicit token: CompatToken): Type = internal.typeRef(pre, sym, args) } /** The API that all type refs support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait TypeRefApi extends TypeApi { this: TypeRef => /** The prefix of the type reference. * Is equal to `NoPrefix` if the prefix is not applicable. */ def pre: Type /** The underlying symbol of the type reference. */ def sym: Symbol /** The arguments of the type reference. * Is equal to `Nil` if the arguments are not provided. */ def args: List[Type] } /** A subtype of Type representing refined types as well as `ClassInfo` signatures. * @template * @group Types */ type CompoundType >: Null <: CompoundTypeApi with Type /** Has no special methods. Is here to provides erased identity for `CompoundType`. * @group API */ trait CompoundTypeApi /** The `RefinedType` type defines types of any of the forms on the left, * with their RefinedType representations to the right. * {{{ * P_1 with ... with P_m { D_1; ...; D_n} RefinedType(List(P_1, ..., P_m), Scope(D_1, ..., D_n)) * P_1 with ... with P_m RefinedType(List(P_1, ..., P_m), Scope()) * { D_1; ...; D_n} RefinedType(List(AnyRef), Scope(D_1, ..., D_n)) * }}} * @template * @group Types */ type RefinedType >: Null <: RefinedTypeApi with CompoundType /** The constructor/extractor for `RefinedType` instances. * @group Extractors */ val RefinedType: RefinedTypeExtractor /** An extractor class to create and pattern match with syntax `RefinedType(parents, decls)` * Here, `parents` is the list of parent types of the class, and `decls` is the scope * containing all declarations in the class. * @group Extractors */ abstract class RefinedTypeExtractor { def unapply(tpe: RefinedType): Option[(List[Type], Scope)] /** @see [[Internals.InternalApi.refinedType]] */ @deprecated("use `internal.refinedType` instead", "2.11.0") def apply(parents: List[Type], decls: Scope)(implicit token: CompatToken): RefinedType = internal.refinedType(parents, decls) /** @see [[Internals.InternalApi.refinedType]] */ @deprecated("use `internal.refinedType` instead", "2.11.0") def apply(parents: List[Type], decls: Scope, clazz: Symbol)(implicit token: CompatToken): RefinedType = internal.refinedType(parents, decls, clazz) } /** The API that all refined types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait RefinedTypeApi extends TypeApi { this: RefinedType => /** The superclasses of the type. */ def parents: List[Type] /** The scope that holds the definitions comprising the type. */ def decls: MemberScope } /** The `ClassInfo` type signature is used to define parents and declarations * of classes, traits, and objects. If a class, trait, or object C is declared like this * {{{ * C extends P_1 with ... with P_m { D_1; ...; D_n} * }}} * its `ClassInfo` type has the following form: * {{{ * ClassInfo(List(P_1, ..., P_m), Scope(D_1, ..., D_n), C) * }}} * @template * @group Types */ type ClassInfoType >: Null <: ClassInfoTypeApi with CompoundType /** The constructor/extractor for `ClassInfoType` instances. * @group Extractors */ val ClassInfoType: ClassInfoTypeExtractor /** An extractor class to create and pattern match with syntax `ClassInfo(parents, decls, clazz)` * Here, `parents` is the list of parent types of the class, `decls` is the scope * containing all declarations in the class, and `clazz` is the symbol of the class * itself. * @group Extractors */ abstract class ClassInfoTypeExtractor { def unapply(tpe: ClassInfoType): Option[(List[Type], Scope, Symbol)] /** @see [[Internals.InternalApi.classInfoType]] */ @deprecated("use `internal.classInfoType` instead", "2.11.0") def apply(parents: List[Type], decls: Scope, typeSymbol: Symbol)(implicit token: CompatToken): ClassInfoType = internal.classInfoType(parents, decls, typeSymbol) } /** The API that all class info types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait ClassInfoTypeApi extends TypeApi { this: ClassInfoType => /** The superclasses of the class type. */ def parents: List[Type] /** The scope that holds the definitions comprising the class type. */ def decls: MemberScope /** The symbol underlying the class type. */ def typeSymbol: Symbol } /** The `MethodType` type signature is used to indicate parameters and result type of a method * @template * @group Types */ type MethodType >: Null <: MethodTypeApi with Type /** The constructor/extractor for `MethodType` instances. * @group Extractors */ val MethodType: MethodTypeExtractor /** An extractor class to create and pattern match with syntax `MethodType(params, restpe)` * Here, `params` is a potentially empty list of parameter symbols of the method, * and `restpe` is the result type of the method. If the method is curried, `restpe` would * be another `MethodType`. * Note: `MethodType(Nil, Int)` would be the type of a method defined with an empty parameter list. * {{{ * def f(): Int * }}} * If the method is completely parameterless, as in * {{{ * def f: Int * }}} * its type is a `NullaryMethodType`. * @group Extractors */ abstract class MethodTypeExtractor { def unapply(tpe: MethodType): Option[(List[Symbol], Type)] /** @see [[Internals.InternalApi.methodType]] */ @deprecated("use `internal.methodType` instead", "2.11.0") def apply(params: List[Symbol], resultType: Type)(implicit token: CompatToken): MethodType = internal.methodType(params, resultType) } /** The API that all method types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait MethodTypeApi extends TypeApi { this: MethodType => /** The symbols that correspond to the parameters of the method. */ def params: List[Symbol] /** The result type of the method. */ def resultType: Type } /** The `NullaryMethodType` type signature is used for parameterless methods * with declarations of the form `def foo: T` * @template * @group Types */ type NullaryMethodType >: Null <: NullaryMethodTypeApi with Type /** The constructor/extractor for `NullaryMethodType` instances. * @group Extractors */ val NullaryMethodType: NullaryMethodTypeExtractor /** An extractor class to create and pattern match with syntax `NullaryMethodType(resultType)`. * Here, `resultType` is the result type of the parameterless method. * @group Extractors */ abstract class NullaryMethodTypeExtractor { def unapply(tpe: NullaryMethodType): Option[(Type)] /** @see [[Internals.InternalApi.nullaryMethodType]] */ @deprecated("use `internal.nullaryMethodType` instead", "2.11.0") def apply(resultType: Type)(implicit token: CompatToken): NullaryMethodType = internal.nullaryMethodType(resultType) } /** The API that all nullary method types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait NullaryMethodTypeApi extends TypeApi { this: NullaryMethodType => /** The result type of the method. */ def resultType: Type } /** The `PolyType` type signature is used for polymorphic methods * that have at least one type parameter. * @template * @group Types */ type PolyType >: Null <: PolyTypeApi with Type /** The constructor/extractor for `PolyType` instances. * @group Extractors */ val PolyType: PolyTypeExtractor /** An extractor class to create and pattern match with syntax `PolyType(typeParams, resultType)`. * Here, `typeParams` are the type parameters of the method and `resultType` * is the type signature following the type parameters. * @group Extractors */ abstract class PolyTypeExtractor { def unapply(tpe: PolyType): Option[(List[Symbol], Type)] /** @see [[Internals.InternalApi.polyType]] */ @deprecated("use `internal.polyType` instead", "2.11.0") def apply(typeParams: List[Symbol], resultType: Type)(implicit token: CompatToken): PolyType = internal.polyType(typeParams, resultType) } /** The API that all polymorphic types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait PolyTypeApi extends TypeApi { this: PolyType => /** The symbols corresponding to the type parameters. */ def typeParams: List[Symbol] /** The underlying type. */ def resultType: Type } /** The `ExistentialType` type signature is used for existential types and * wildcard types. * @template * @group Types */ type ExistentialType >: Null <: ExistentialTypeApi with Type /** The constructor/extractor for `ExistentialType` instances. * @group Extractors */ val ExistentialType: ExistentialTypeExtractor /** An extractor class to create and pattern match with syntax * `ExistentialType(quantified, underlying)`. * Here, `quantified` are the type variables bound by the existential type and `underlying` * is the type that's existentially quantified. * @group Extractors */ abstract class ExistentialTypeExtractor { def unapply(tpe: ExistentialType): Option[(List[Symbol], Type)] /** @see [[Internals.InternalApi.existentialType]] */ @deprecated("use `internal.existentialType` instead", "2.11.0") def apply(quantified: List[Symbol], underlying: Type)(implicit token: CompatToken): ExistentialType = internal.existentialType(quantified, underlying) } /** The API that all existential types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait ExistentialTypeApi extends TypeApi { this: ExistentialType => /** The symbols corresponding to the `forSome` clauses of the existential type. */ def quantified: List[Symbol] /** The underlying type of the existential type. */ def underlying: Type } /** The `AnnotatedType` type signature is used for annotated types of the * for `<type> @<annotation>`. * @template * @group Types */ type AnnotatedType >: Null <: AnnotatedTypeApi with Type /** The constructor/extractor for `AnnotatedType` instances. * @group Extractors */ val AnnotatedType: AnnotatedTypeExtractor /** An extractor class to create and pattern match with syntax * `AnnotatedType(annotations, underlying)`. * Here, `annotations` are the annotations decorating the underlying type `underlying`. * `selfSym` is a symbol representing the annotated type itself. * @group Extractors */ abstract class AnnotatedTypeExtractor { def unapply(tpe: AnnotatedType): Option[(List[Annotation], Type)] /** @see [[Internals.InternalApi.annotatedType]] */ @deprecated("use `internal.annotatedType` instead", "2.11.0") def apply(annotations: List[Annotation], underlying: Type)(implicit token: CompatToken): AnnotatedType = internal.annotatedType(annotations, underlying) } /** The API that all annotated types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait AnnotatedTypeApi extends TypeApi { this: AnnotatedType => /** The annotations. */ def annotations: List[Annotation] /** The annotee. */ def underlying: Type } /** The `TypeBounds` type signature is used to indicate lower and upper type bounds * of type parameters and abstract types. It is not a first-class type. * If an abstract type or type parameter is declared with any of the forms * on the left, its type signature is the TypeBounds type on the right. * {{{ * T >: L <: U TypeBounds(L, U) * T >: L TypeBounds(L, Any) * T <: U TypeBounds(Nothing, U) * }}} * @template * @group Types */ type TypeBounds >: Null <: TypeBoundsApi with Type /** The constructor/extractor for `TypeBounds` instances. * @group Extractors */ val TypeBounds: TypeBoundsExtractor /** An extractor class to create and pattern match with syntax `TypeBound(lower, upper)` * Here, `lower` is the lower bound of the `TypeBounds` pair, and `upper` is * the upper bound. * @group Extractors */ abstract class TypeBoundsExtractor { def unapply(tpe: TypeBounds): Option[(Type, Type)] /** @see [[Internals.InternalApi.typeBounds]] */ @deprecated("use `internal.typeBounds` instead", "2.11.0") def apply(lo: Type, hi: Type)(implicit token: CompatToken): TypeBounds = internal.typeBounds(lo, hi) } /** The API that all type bounds support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait TypeBoundsApi extends TypeApi { this: TypeBounds => /** The lower bound. * Is equal to `definitions.NothingTpe` if not specified explicitly. */ def lo: Type /** The upper bound. * Is equal to `definitions.AnyTpe` if not specified explicitly. */ def hi: Type } /** An object representing an unknown type, used during type inference. * If you see WildcardType outside of inference it is almost certainly a bug. * @group Types */ val WildcardType: Type /** BoundedWildcardTypes, used only during type inference, are created in * two places: * * 1. If the expected type of an expression is an existential type, * its hidden symbols are replaced with bounded wildcards. * 2. When an implicit conversion is being sought based in part on * the name of a method in the converted type, a HasMethodMatching * type is created: a MethodType with parameters typed as * BoundedWildcardTypes. * @template * @group Types */ type BoundedWildcardType >: Null <: BoundedWildcardTypeApi with Type /** The constructor/extractor for `BoundedWildcardType` instances. * @group Extractors */ val BoundedWildcardType: BoundedWildcardTypeExtractor /** An extractor class to create and pattern match with syntax `BoundedWildcardTypeExtractor(bounds)` * with `bounds` denoting the type bounds. * @group Extractors */ abstract class BoundedWildcardTypeExtractor { def unapply(tpe: BoundedWildcardType): Option[TypeBounds] /** @see [[Internals.InternalApi.boundedWildcardType]] */ @deprecated("use `internal.boundedWildcardType` instead", "2.11.0") def apply(bounds: TypeBounds)(implicit token: CompatToken): BoundedWildcardType = internal.boundedWildcardType(bounds) } /** The API that all this types support. * The main source of information about types is the [[scala.reflect.api.Types]] page. * @group API */ trait BoundedWildcardTypeApi extends TypeApi { this: BoundedWildcardType => /** Type bounds for the wildcard type. */ def bounds: TypeBounds } /** The least upper bound of a list of types, as determined by `<:<`. * @group TypeOps */ def lub(xs: List[Type]): Type /** The greatest lower bound of a list of types, as determined by `<:<`. * @group TypeOps */ def glb(ts: List[Type]): Type /** A creator for type applications. * * Useful to combine and create types out of generic ones. For example: * * {{{ * scala> val boolType = typeOf[Boolean] * boolType: reflect.runtime.universe.Type = Boolean * * scala> val optionType = typeOf[Option[_]] * optionType: reflect.runtime.universe.Type = Option[_] * * scala> appliedType(optionType.typeConstructor, boolType) * res0: reflect.runtime.universe.Type = Option[Boolean] * }}} * * @group TypeOps */ def appliedType(tycon: Type, args: List[Type]): Type /** @see [[appliedType]] */ def appliedType(tycon: Type, args: Type*): Type /** @see [[appliedType]] */ def appliedType(sym: Symbol, args: List[Type]): Type /** @see [[appliedType]] */ def appliedType(sym: Symbol, args: Type*): Type }
scala/scala
src/reflect/scala/reflect/api/Types.scala
Scala
apache-2.0
39,567
package io.eels.component.parquet import java.nio.{ByteBuffer, ByteOrder} import java.time._ import java.time.temporal.ChronoUnit import com.sksamuel.exts.Logging import io.eels.coercion._ import io.eels.schema._ import org.apache.parquet.io.api.{Binary, RecordConsumer} import scala.math.BigDecimal.RoundingMode.RoundingMode // accepts a scala/java value and writes it out to a record consumer as // the appropriate parquet type trait RecordWriter { def write(record: RecordConsumer, value: Any): Unit } object RecordWriter { def apply(dataType: DataType, roundingMode: RoundingMode): RecordWriter = { dataType match { case ArrayType(elementType) => new ArrayRecordWriter(RecordWriter(elementType, roundingMode)) case BinaryType => BinaryParquetWriter case BigIntType => BigIntRecordWriter case BooleanType => BooleanRecordWriter case CharType(_) => StringRecordWriter case DateType => DateRecordWriter case DecimalType(precision, scale) => new DecimalWriter(precision, scale, roundingMode) case DoubleType => DoubleRecordWriter case FloatType => FloatRecordWriter case _: IntType => IntRecordWriter case _: LongType => LongRecordWriter case _: ShortType => ShortParquetWriter case mapType@MapType(keyType, valueType) => new MapRecordWriter(mapType, apply(keyType, roundingMode), apply(valueType, roundingMode)) case StringType => StringRecordWriter case struct: StructType => new StructRecordWriter(struct, roundingMode, true) case TimeMillisType => TimeRecordWriter case TimestampMillisType => TimestampRecordWriter case VarcharType(_) => StringRecordWriter } } } class MapRecordWriter(mapType: MapType, keyWriter: RecordWriter, valueWriter: RecordWriter) extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { val map = MapCoercer.coerce(value) record.startGroup() record.startField("key_value", 0) map.foreach { case (key, v) => record.startGroup() record.startField("key", 0) keyWriter.write(record, key) record.endField("key", 0) record.startField("value", 1) valueWriter.write(record, v) record.endField("value", 1) record.endGroup() } record.endField("key_value", 0) record.endGroup() } } class ArrayRecordWriter(nested: RecordWriter) extends RecordWriter with Logging { override def write(record: RecordConsumer, value: Any): Unit = { val seq = SequenceCoercer.coerce(value) // this layout follows the spark style, an array is a group of a single element called list, which itself // contains repeated groups which contain another record called element record.startGroup() record.startField("list", 0) seq.foreach { x => record.startGroup() record.startField("element", 0) nested.write(record, x) record.endField("element", 0) record.endGroup() } record.endField("list", 0) record.endGroup() } } class StructRecordWriter(structType: StructType, roundingMode: RoundingMode, nested: Boolean // nested groups, ie not the outer record, must be handled differently ) extends RecordWriter with Logging { val writers = structType.fields.map(_.dataType).map(RecordWriter.apply(_, roundingMode)) override def write(record: RecordConsumer, value: Any): Unit = { require(record != null) if (nested) record.startGroup() val values = SequenceCoercer.coerce(value) for (k <- structType.fields.indices) { val value = values(k) // if a value is null then parquet requires us to completely skip the field if (value != null) { val field = structType.field(k) record.startField(field.name, k) val writer = writers(k) writer.write(record, value) record.endField(field.name, k) } } if (nested) record.endGroup() } } object BinaryParquetWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { value match { case array: Array[Byte] => record.addBinary(Binary.fromReusedByteArray(array)) case seq: Seq[Byte] => write(record, seq.toArray) } } } // The scale stores the number of digits of that value that are to the right of the decimal point, // and the precision stores the maximum number of sig digits supported in the unscaled value. class DecimalWriter(precision: Precision, scale: Scale, roundingMode: RoundingMode) extends RecordWriter { private val byteSizeForPrecision = ParquetSchemaFns.byteSizeForPrecision(precision.value) override def write(record: RecordConsumer, value: Any): Unit = { val bd = BigDecimalCoercer.coerce(value) .setScale(scale.value, roundingMode) .underlying() record.addBinary(decimalAsBinary(bd, bd.unscaledValue())) } import org.apache.parquet.io.api.Binary private def decimalAsBinary(original: java.math.BigDecimal, unscaled: java.math.BigInteger): Binary = { val bytes = unscaled.toByteArray if (bytes.length == byteSizeForPrecision) Binary.fromReusedByteArray(bytes) else if (bytes.length < byteSizeForPrecision) { val decimalBuffer = new Array[Byte](byteSizeForPrecision) // For negatives all high bits need to be 1 hence -1 used val signByte = if (unscaled.signum < 0) -1: Byte else 0: Byte java.util.Arrays.fill(decimalBuffer, 0, decimalBuffer.length - bytes.length, signByte) System.arraycopy(bytes, 0, decimalBuffer, decimalBuffer.length - bytes.length, bytes.length) Binary.fromReusedByteArray(decimalBuffer) } else throw new IllegalStateException(s"Decimal precision too small, value=$original, precision=${precision.value}") } } object BigIntRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addLong(BigIntegerCoercer.coerce(value).longValue) } } object DateRecordWriter extends RecordWriter { private val UnixEpoch = LocalDate.of(1970, 1, 1) // should write out number of days since unix epoch override def write(record: RecordConsumer, value: Any): Unit = { value match { case date: java.sql.Date => val local = Instant.ofEpochMilli(date.getTime).atZone(ZoneId.systemDefault).toLocalDate val days = ChronoUnit.DAYS.between(UnixEpoch, local) record.addInteger(days.toInt) } } } object TimeRecordWriter extends RecordWriter { private val JulianEpochInGregorian = LocalDateTime.of(-4713, 11, 24, 0, 0, 0) // first 8 bytes are the nanoseconds // second 4 bytes are the days override def write(record: RecordConsumer, value: Any): Unit = { val timestamp = TimestampCoercer.coerce(value) val nanos = timestamp.getNanos val dt = Instant.ofEpochMilli(timestamp.getTime).atZone(ZoneId.systemDefault) val days = ChronoUnit.DAYS.between(JulianEpochInGregorian, dt).toInt val bytes = ByteBuffer.allocate(12).order(ByteOrder.LITTLE_ENDIAN).putLong(nanos).putInt(days) val binary = Binary.fromReusedByteBuffer(bytes) record.addBinary(binary) } } object TimestampRecordWriter extends RecordWriter { private val JulianEpochInGregorian = LocalDateTime.of(-4713, 11, 24, 0, 0, 0) override def write(record: RecordConsumer, value: Any): Unit = { val timestamp = TimestampCoercer.coerce(value) val dt = Instant.ofEpochMilli(timestamp.getTime).atZone(ZoneId.systemDefault) val days = ChronoUnit.DAYS.between(JulianEpochInGregorian, dt).toInt val nanos = timestamp.getNanos + ChronoUnit.NANOS.between(dt.toLocalDate.atStartOfDay(ZoneId.systemDefault).toLocalTime, dt.toLocalTime) val bytes = ByteBuffer.allocate(12).order(ByteOrder.LITTLE_ENDIAN).putLong(nanos).putInt(days).array() val binary = Binary.fromReusedByteArray(bytes) record.addBinary(binary) } } object StringRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addBinary(Binary.fromString(StringCoercer.coerce(value))) } } object ShortParquetWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addInteger(ShortCoercer.coerce(value)) } } object DoubleRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addDouble(DoubleCoercer.coerce(value)) } } object FloatRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addFloat(FloatCoercer.coerce(value)) } } object BooleanRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addBoolean(BooleanCoercer.coerce(value)) } } object LongRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addLong(LongCoercer.coerce(value)) } } object IntRecordWriter extends RecordWriter { override def write(record: RecordConsumer, value: Any): Unit = { record.addInteger(IntCoercer.coerce(value)) } }
sksamuel/eel-sdk
eel-core/src/main/scala/io/eels/component/parquet/RecordWriter.scala
Scala
apache-2.0
9,192
package cpup.mc.oldenMagic.api.oldenLanguage.runeParsing import cpup.mc.oldenMagic.api.oldenLanguage.runes.TRune import cpup.mc.oldenMagic.api.oldenLanguage.casting.conditions.{TCondition, TConditionType} trait TConditionRune extends TRune { def conditionTypes: Set[TConditionType] def filter(conditions: Set[TCondition]): Boolean }
CoderPuppy/oldenmagic-mc
src/main/scala/cpup/mc/oldenMagic/api/oldenLanguage/runeParsing/TConditionRune.scala
Scala
mit
336
object InitialHamiltonian { def construct(n: Int): HybridMatrix = { val size = Size(n) val positions = IndexedSeq.fill[collection.mutable.Buffer[Int]](size.dim)(collection.mutable.Buffer.empty) recurse(size.dim, 0, 0) { (x, y, xOff, yOff) => positions(x + xOff) += y + yOff } HybridMatrix.withPositions(size, -1, positions) } def recurse(dim: Int, xOff: Int, yOff: Int)(add: (Int, Int, Int, Int) => Unit): Unit = { if (dim == 2) { // Pauli(1) matrix add(0, 1, xOff, yOff) add(1, 0, xOff, yOff) } else { val half = dim / 2 // Upper left recurse(half, xOff, yOff)(add) // Upper right (0 until half) map { i => add(i, i, xOff + half, yOff) } // Lower left (0 until half) map { i => add(i, i, xOff, yOff + half) } // Lower right recurse(half, xOff + half, yOff + half)(add) } } }
stevenheidel/recquasim
scala/src/main/scala/InitialHamiltonian.scala
Scala
apache-2.0
938
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.examples import org.apache.spark.SparkContext object BroadcastTest { def main(args: Array[String]) { if (args.length == 0) { System.err.println("Usage: BroadcastTest <master> [slices] [numElem] [broadcastAlgo]" + " [blockSize]") System.exit(1) } val bcName = if (args.length > 3) args(3) else "Http" val blockSize = if (args.length > 4) args(4) else "4096" System.setProperty("spark.broadcast.factory", "org.apache.spark.broadcast." + bcName + "BroadcastFactory") System.setProperty("spark.broadcast.blockSize", blockSize) val sc = new SparkContext(args(0), "Broadcast Test", System.getenv("SPARK_HOME"), SparkContext.jarOfClass(this.getClass)) val slices = if (args.length > 1) args(1).toInt else 2 val num = if (args.length > 2) args(2).toInt else 1000000 val arr1 = new Array[Int](num) for (i <- 0 until arr1.length) { arr1(i) = i } for (i <- 0 until 3) { println("Iteration " + i) println("===========") val startTime = System.nanoTime val barr1 = sc.broadcast(arr1) val observedSizes = sc.parallelize(1 to 10, slices).map(_ => barr1.value.size) // Collect the small RDD so we can print the observed sizes locally. observedSizes.collect().foreach(i => println(i)) println("Iteration %d took %.0f milliseconds".format(i, (System.nanoTime - startTime) / 1E6)) } System.exit(0) } }
sryza/spark
examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
Scala
apache-2.0
2,269
package scala.collection package mutable /** This class implements mutable maps with `Long` keys based on a hash table with open addressing. * * Basic map operations on single entries, including `contains` and `get`, * are typically substantially faster with `LongMap` than [[HashMap]]. Methods * that act on the whole map, including `foreach` and `map` are not in * general expected to be faster than with a generic map, save for those * that take particular advantage of the internal structure of the map: * `foreachKey`, `foreachValue`, `mapValuesNow`, and `transformValues`. * * Maps with open addressing may become less efficient at lookup after * repeated addition/removal of elements. Although `LongMap` makes a * decent attempt to remain efficient regardless, calling `repack` * on a map that will no longer have elements removed but will be * used heavily may save both time and storage space. * * This map is not intended to contain more than 2^29 entries (approximately * 500 million). The maximum capacity is 2^30, but performance will degrade * rapidly as 2^30 is approached. * */ @SerialVersionUID(3L) final class LongMap[V] private[collection] (defaultEntry: Long => V, initialBufferSize: Int, initBlank: Boolean) extends AbstractMap[Long, V] with MapOps[Long, V, Map, LongMap[V]] with StrictOptimizedIterableOps[(Long, V), Iterable, LongMap[V]] with Serializable { import LongMap._ def this() = this(LongMap.exceptionDefault, 16, true) def clear(): Unit = { keysIterator() foreach -= } // TODO optimize override protected def fromSpecificIterable(coll: scala.collection.Iterable[(Long, V)]): LongMap[V] = { //TODO should this be the default implementation of this method in StrictOptimizedIterableOps? val b = newSpecificBuilder() b.sizeHint(coll) b.addAll(coll) b.result() } override protected def newSpecificBuilder(): Builder[(Long, V),LongMap[V]] = new GrowableBuilder(LongMap.empty[V]) /** Creates a new `LongMap` that returns default values according to a supplied key-value mapping. */ def this(defaultEntry: Long => V) = this(defaultEntry, 16, true) /** Creates a new `LongMap` with an initial buffer of specified size. * * A LongMap can typically contain half as many elements as its buffer size * before it requires resizing. */ def this(initialBufferSize: Int) = this(LongMap.exceptionDefault, initialBufferSize, true) /** Creates a new `LongMap` with specified default values and initial buffer size. */ def this(defaultEntry: Long => V, initialBufferSize: Int) = this(defaultEntry, initialBufferSize, true) private[this] var mask = 0 private[this] var extraKeys: Int = 0 private[this] var zeroValue: AnyRef = null private[this] var minValue: AnyRef = null private[this] var _size = 0 private[this] var _vacant = 0 private[this] var _keys: Array[Long] = null private[this] var _values: Array[AnyRef] = null if (initBlank) defaultInitialize(initialBufferSize) private[this] def defaultInitialize(n: Int) = { mask = if (n<0) 0x7 else (((1 << (32 - java.lang.Integer.numberOfLeadingZeros(n-1))) - 1) & 0x3FFFFFFF) | 0x7 _keys = new Array[Long](mask+1) _values = new Array[AnyRef](mask+1) } private[collection] def initializeTo( m: Int, ek: Int, zv: AnyRef, mv: AnyRef, sz: Int, vc: Int, kz: Array[Long], vz: Array[AnyRef] ): Unit = { mask = m; extraKeys = ek; zeroValue = zv; minValue = mv; _size = sz; _vacant = vc; _keys = kz; _values = vz } override def size: Int = _size + (extraKeys+1)/2 override def empty: LongMap[V] = new LongMap() private def imbalanced: Boolean = (_size + _vacant) > 0.5*mask || _vacant > _size private def toIndex(k: Long): Int = { // Part of the MurmurHash3 32 bit finalizer val h = ((k ^ (k >>> 32)) & 0xFFFFFFFFL).toInt val x = (h ^ (h >>> 16)) * 0x85EBCA6B (x ^ (x >>> 13)) & mask } private def seekEmpty(k: Long): Int = { var e = toIndex(k) var x = 0 while (_keys(e) != 0) { x += 1; e = (e + 2*(x+1)*x - 3) & mask } e } private def seekEntry(k: Long): Int = { var e = toIndex(k) var x = 0 var q = 0L while ({ q = _keys(e); if (q==k) return e; q != 0}) { x += 1; e = (e + 2*(x+1)*x - 3) & mask } e | MissingBit } private def seekEntryOrOpen(k: Long): Int = { var e = toIndex(k) var x = 0 var q = 0L while ({ q = _keys(e); if (q==k) return e; q+q != 0}) { x += 1 e = (e + 2*(x+1)*x - 3) & mask } if (q == 0) return e | MissingBit val o = e | MissVacant while ({ q = _keys(e); if (q==k) return e; q != 0}) { x += 1 e = (e + 2*(x+1)*x - 3) & mask } o } override def contains(key: Long): Boolean = { if (key == -key) (((key>>>63).toInt+1) & extraKeys) != 0 else seekEntry(key) >= 0 } override def get(key: Long): Option[V] = { if (key == -key) { if ((((key>>>63).toInt+1) & extraKeys) == 0) None else if (key == 0) Some(zeroValue.asInstanceOf[V]) else Some(minValue.asInstanceOf[V]) } else { val i = seekEntry(key) if (i < 0) None else Some(_values(i).asInstanceOf[V]) } } override def getOrElse[V1 >: V](key: Long, default: => V1): V1 = { if (key == -key) { if ((((key>>>63).toInt+1) & extraKeys) == 0) default else if (key == 0) zeroValue.asInstanceOf[V1] else minValue.asInstanceOf[V1] } else { val i = seekEntry(key) if (i < 0) default else _values(i).asInstanceOf[V1] } } override def getOrElseUpdate(key: Long, defaultValue: => V): V = { if (key == -key) { val kbits = (key>>>63).toInt + 1 if ((kbits & extraKeys) == 0) { val value = defaultValue extraKeys |= kbits if (key == 0) zeroValue = value.asInstanceOf[AnyRef] else minValue = value.asInstanceOf[AnyRef] value } else if (key == 0) zeroValue.asInstanceOf[V] else minValue.asInstanceOf[V] } else { var i = seekEntryOrOpen(key) if (i < 0) { // It is possible that the default value computation was side-effecting // Our hash table may have resized or even contain what we want now // (but if it does, we'll replace it) val value = { val ok = _keys val ans = defaultValue if (ok ne _keys) { i = seekEntryOrOpen(key) if (i >= 0) _size -= 1 } ans } _size += 1 val j = i & IndexMask _keys(j) = key _values(j) = value.asInstanceOf[AnyRef] if ((i & VacantBit) != 0) _vacant -= 1 else if (imbalanced) repack() value } else _values(i).asInstanceOf[V] } } /** Retrieves the value associated with a key, or the default for that type if none exists * (null for AnyRef, 0 for floats and integers). * * Note: this is the fastest way to retrieve a value that may or * may not exist, if the default null/zero is acceptable. For key/value * pairs that do exist, `apply` (i.e. `map(key)`) is equally fast. */ def getOrNull(key: Long): V = { if (key == -key) { if ((((key>>>63).toInt+1) & extraKeys) == 0) null.asInstanceOf[V] else if (key == 0) zeroValue.asInstanceOf[V] else minValue.asInstanceOf[V] } else { val i = seekEntry(key) if (i < 0) null.asInstanceOf[V] else _values(i).asInstanceOf[V] } } /** Retrieves the value associated with a key. * If the key does not exist in the map, the `defaultEntry` for that key * will be returned instead. */ override def apply(key: Long): V = { if (key == -key) { if ((((key>>>63).toInt+1) & extraKeys) == 0) defaultEntry(key) else if (key == 0) zeroValue.asInstanceOf[V] else minValue.asInstanceOf[V] } else { val i = seekEntry(key) if (i < 0) defaultEntry(key) else _values(i).asInstanceOf[V] } } /** The user-supplied default value for the key. Throws an exception * if no other default behavior was specified. */ override def default(key: Long) = defaultEntry(key) private def repack(newMask: Int): Unit = { val ok = _keys val ov = _values mask = newMask _keys = new Array[Long](mask+1) _values = new Array[AnyRef](mask+1) _vacant = 0 var i = 0 while (i < ok.length) { val k = ok(i) if (k != -k) { val j = seekEmpty(k) _keys(j) = k _values(j) = ov(i) } i += 1 } } /** Repacks the contents of this `LongMap` for maximum efficiency of lookup. * * For maps that undergo a complex creation process with both addition and * removal of keys, and then are used heavily with no further removal of * elements, calling `repack` after the end of the creation can result in * improved performance. Repacking takes time proportional to the number * of entries in the map. */ def repack(): Unit = { var m = mask if (_size + _vacant >= 0.5*mask && !(_vacant > 0.2*mask)) m = ((m << 1) + 1) & IndexMask while (m > 8 && 8*_size < m) m = m >>> 1 repack(m) } override def put(key: Long, value: V): Option[V] = { if (key == -key) { if (key == 0) { val ans = if ((extraKeys&1) == 1) Some(zeroValue.asInstanceOf[V]) else None zeroValue = value.asInstanceOf[AnyRef] extraKeys |= 1 ans } else { val ans = if ((extraKeys&2) == 1) Some(minValue.asInstanceOf[V]) else None minValue = value.asInstanceOf[AnyRef] extraKeys |= 2 ans } } else { val i = seekEntryOrOpen(key) if (i < 0) { val j = i & IndexMask _keys(j) = key _values(j) = value.asInstanceOf[AnyRef] _size += 1 if ((i & VacantBit) != 0) _vacant -= 1 else if (imbalanced) repack() None } else { val ans = Some(_values(i).asInstanceOf[V]) _keys(i) = key _values(i) = value.asInstanceOf[AnyRef] ans } } } /** Updates the map to include a new key-value pair. * * This is the fastest way to add an entry to a `LongMap`. */ override def update(key: Long, value: V): Unit = { if (key == -key) { if (key == 0) { zeroValue = value.asInstanceOf[AnyRef] extraKeys |= 1 } else { minValue = value.asInstanceOf[AnyRef] extraKeys |= 2 } } else { val i = seekEntryOrOpen(key) if (i < 0) { val j = i & IndexMask _keys(j) = key _values(j) = value.asInstanceOf[AnyRef] _size += 1 if ((i & VacantBit) != 0) _vacant -= 1 else if (imbalanced) repack() } else { _keys(i) = key _values(i) = value.asInstanceOf[AnyRef] } } } /** Adds a new key/value pair to this map and returns the map. */ def +=(key: Long, value: V): this.type = { update(key, value); this } override def addOne(kv: (Long, V)): this.type = { update(kv._1, kv._2); this } def subtractOne(key: Long): this.type = { if (key == -key) { if (key == 0L) { extraKeys &= 0x2 zeroValue = null } else { extraKeys &= 0x1 minValue = null } } else { val i = seekEntry(key) if (i >= 0) { _size -= 1 _vacant += 1 _keys(i) = Long.MinValue _values(i) = null } } this } def iterator(): Iterator[(Long, V)] = new Iterator[(Long, V)] { private[this] val kz = _keys private[this] val vz = _values private[this] var nextPair: (Long, V) = if (extraKeys==0) null else if ((extraKeys&1)==1) (0L, zeroValue.asInstanceOf[V]) else (Long.MinValue, minValue.asInstanceOf[V]) private[this] var anotherPair: (Long, V) = if (extraKeys==3) (Long.MinValue, minValue.asInstanceOf[V]) else null private[this] var index = 0 def hasNext: Boolean = nextPair != null || (index < kz.length && { var q = kz(index) while (q == -q) { index += 1 if (index >= kz.length) return false q = kz(index) } nextPair = (kz(index), vz(index).asInstanceOf[V]) index += 1 true }) def next() = { if (nextPair == null && !hasNext) throw new NoSuchElementException("next") val ans = nextPair if (anotherPair != null) { nextPair = anotherPair anotherPair = null } else nextPair = null ans } } override def foreach[U](f: ((Long,V)) => U): Unit = { if ((extraKeys & 1) == 1) f((0L, zeroValue.asInstanceOf[V])) if ((extraKeys & 2) == 2) f((Long.MinValue, minValue.asInstanceOf[V])) var i,j = 0 while (i < _keys.length & j < _size) { val k = _keys(i) if (k != -k) { j += 1 f((k, _values(i).asInstanceOf[V])) } i += 1 } } override def clone(): LongMap[V] = { val kz = java.util.Arrays.copyOf(_keys, _keys.length) val vz = java.util.Arrays.copyOf(_values, _values.length) val lm = new LongMap[V](defaultEntry, 1, false) lm.initializeTo(mask, extraKeys, zeroValue, minValue, _size, _vacant, kz, vz) lm } /* override def +[V1 >: V](kv: (Long, V1)): LongMap[V1] = { val lm = clone().asInstanceOf[LongMap[V1]] lm += kv lm } */ override def concat[V1 >: V](xs: scala.collection.Iterable[(Long, V1)]): LongMap[V1] = { val lm = clone().asInstanceOf[LongMap[V1]] xs.foreach(kv => lm += kv) lm } override def ++ [V1 >: V](xs: scala.collection.Iterable[(Long, V1)]): LongMap[V1] = concat(xs) @deprecated("Use LongMap.from(m).add(k,v) instead of m.updated(k, v)", "2.13.0") def updated[V1 >: V](key: Long, value: V1): LongMap[V1] = { val lm = clone().asInstanceOf[LongMap[V1]] lm += (key, value) lm } /** Applies a function to all keys of this map. */ def foreachKey[A](f: Long => A): Unit = { if ((extraKeys & 1) == 1) f(0L) if ((extraKeys & 2) == 2) f(Long.MinValue) var i,j = 0 while (i < _keys.length & j < _size) { val k = _keys(i) if (k != -k) { j += 1 f(k) } i += 1 } } /** Applies a function to all values of this map. */ def foreachValue[A](f: V => A): Unit = { if ((extraKeys & 1) == 1) f(zeroValue.asInstanceOf[V]) if ((extraKeys & 2) == 2) f(minValue.asInstanceOf[V]) var i,j = 0 while (i < _keys.length & j < _size) { val k = _keys(i) if (k != -k) { j += 1 f(_values(i).asInstanceOf[V]) } i += 1 } } /** Creates a new `LongMap` with different values. * Unlike `mapValues`, this method generates a new * collection immediately. */ def mapValuesNow[V1](f: V => V1): LongMap[V1] = { val zv = if ((extraKeys & 1) == 1) f(zeroValue.asInstanceOf[V]).asInstanceOf[AnyRef] else null val mv = if ((extraKeys & 2) == 2) f(minValue.asInstanceOf[V]).asInstanceOf[AnyRef] else null val lm = new LongMap[V1](LongMap.exceptionDefault, 1, false) val kz = java.util.Arrays.copyOf(_keys, _keys.length) val vz = new Array[AnyRef](_values.length) var i,j = 0 while (i < _keys.length & j < _size) { val k = _keys(i) if (k != -k) { j += 1 vz(i) = f(_values(i).asInstanceOf[V]).asInstanceOf[AnyRef] } i += 1 } lm.initializeTo(mask, extraKeys, zv, mv, _size, _vacant, kz, vz) lm } /** Applies a transformation function to all values stored in this map. * Note: the default, if any, is not transformed. */ def transformValues(f: V => V): this.type = { if ((extraKeys & 1) == 1) zeroValue = f(zeroValue.asInstanceOf[V]).asInstanceOf[AnyRef] if ((extraKeys & 2) == 2) minValue = f(minValue.asInstanceOf[V]).asInstanceOf[AnyRef] var i,j = 0 while (i < _keys.length & j < _size) { val k = _keys(i) if (k != -k) { j += 1 _values(i) = f(_values(i).asInstanceOf[V]).asInstanceOf[AnyRef] } i += 1 } this } def map[V2](f: ((Long, V)) => (Long, V2)): LongMap[V2] = LongMap.from(new View.Map(coll, f)) def flatMap[V2](f: ((Long, V)) => IterableOnce[(Long, V2)]): LongMap[V2] = LongMap.from(new View.FlatMap(coll, f)) def collect[V2](pf: PartialFunction[(Long, V), (Long, V2)]): LongMap[V2] = flatMap(kv => if (pf.isDefinedAt(kv)) new View.Single(pf(kv)) else View.Empty) } object LongMap { private final val IndexMask = 0x3FFFFFFF private final val MissingBit = 0x80000000 private final val VacantBit = 0x40000000 private final val MissVacant = 0xC0000000 private val exceptionDefault: Long => Nothing = (k: Long) => throw new NoSuchElementException(k.toString) /* implicit def canBuildFrom[V, U]: CanBuildFrom[LongMap[V], (Long, U), LongMap[U]] = new CanBuildFrom[LongMap[V], (Long, U), LongMap[U]] { def apply(from: LongMap[V]): LongMapBuilder[U] = apply() def apply(): LongMapBuilder[U] = new LongMapBuilder[U] } */ /** A builder for instances of `LongMap`. * * This builder can be reused to create multiple instances. */ final class LongMapBuilder[V] extends ReusableBuilder[(Long, V), LongMap[V]] { private[collection] var elems: LongMap[V] = new LongMap[V] override def addOne(entry: (Long, V)): this.type = { elems += entry this } def clear(): Unit = elems = new LongMap[V] def result(): LongMap[V] = elems } /** Creates a new `LongMap` with zero or more key/value pairs. */ def apply[V](elems: (Long, V)*): LongMap[V] = buildFromIterableOnce(elems) private def buildFromIterableOnce[V](elems: IterableOnce[(Long, V)]): LongMap[V] = { var sz = elems.knownSize if(sz < 0) sz = 4 val lm = new LongMap[V](sz * 2) elems.iterator().foreach{ case (k,v) => lm(k) = v } if (lm.size < (sz>>3)) lm.repack() lm } /** Creates a new empty `LongMap`. */ def empty[V]: LongMap[V] = new LongMap[V] /** Creates a new empty `LongMap` with the supplied default */ def withDefault[V](default: Long => V): LongMap[V] = new LongMap[V](default) /** Creates a new `LongMap` from an existing source collection. A source collection * which is already a `LongMap` gets cloned. * * @param source Source collection * @tparam A the type of the collection’s elements * @return a new `LongMap` with the elements of `source` */ def from[V](source: IterableOnce[(Long, V)]): LongMap[V] = source match { case source: LongMap[_] => source.clone().asInstanceOf[LongMap[V]] case _ => buildFromIterableOnce(source) } /** Creates a new `LongMap` from arrays of keys and values. * Equivalent to but more efficient than `LongMap((keys zip values): _*)`. */ def fromZip[V](keys: Array[Long], values: Array[V]): LongMap[V] = { val sz = math.min(keys.length, values.length) val lm = new LongMap[V](sz * 2) var i = 0 while (i < sz) { lm(keys(i)) = values(i); i += 1 } if (lm.size < (sz>>3)) lm.repack() lm } /** Creates a new `LongMap` from keys and values. * Equivalent to but more efficient than `LongMap((keys zip values): _*)`. */ def fromZip[V](keys: scala.collection.Iterable[Long], values: scala.collection.Iterable[V]): LongMap[V] = { val sz = math.min(keys.size, values.size) val lm = new LongMap[V](sz * 2) val ki = keys.iterator() val vi = values.iterator() while (ki.hasNext && vi.hasNext) lm(ki.next()) = vi.next() if (lm.size < (sz >> 3)) lm.repack() lm } }
rorygraves/perf_tester
corpus/scala-library/src/main/scala/collection/mutable/LongMap.scala
Scala
apache-2.0
19,776
/* * Copyright (C) 2015 Romain Reuillon * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.openmole.plugin.method.modelfamily.ga import fr.iscpif.mgo._ import scala.util.Random trait ModelFamilyMutation <: DynamicGAMutation with ModelFamilyGenome { def changeModel = 0.1 override def mutate(genome: G, population: Population[G, P, F], archive: A)(implicit rng: Random): G = { def mutated = super.mutate(genome, population, archive) if (rng.nextDouble < changeModel) modelId.set(rng.nextInt(models))(mutated) else mutated } }
ISCPIF/PSEExperiments
openmole-src/openmole/plugins/org.openmole.plugin.method.modelfamily/src/main/scala/org/openmole/plugin/method/modelfamily/ga/ModelFamilyMutation.scala
Scala
agpl-3.0
1,168
package info.hupel.isabelle import scala.concurrent._ import scala.concurrent.duration._ import scala.util.control.Exception._ import cats.arrow.FunctionK import monix.execution.{Cancelable, CancelableFuture, FutureUtils, Scheduler} import org.log4s._ import info.hupel.isabelle.api._ /** Functions to build and create [[System systems]]. */ object System { private val logger = getLogger object StartupException { sealed abstract class Reason(val explain: String) case object Exited extends Reason("exited (session not built?)") case object NoPong extends Reason("ping operation timeout (wrong session?)") } case class StartupException(reason: StartupException.Reason) extends RuntimeException(s"System startup failed: ${reason.explain}") /** * Synchronously build a * [[info.hupel.isabelle.api.Configuration configuration]]. * * This operation is idempotent, but not parallel-safe. It must not be running * simultaneously for the same [[info.hupel.isabelle.setup.Setup setup]], not * even on different JVMs or with differnet configurations. Parallel * invocations of `[[create]]` should be avoided, but are safe under the * condition that they are using independent configurations, or that the * common ancestors of the configurations have already been successfully * built. Refer to the * [[info.hupel.isabelle.api.Configuration documentation of configurations]] * for more details. * * A `true` return value indicates a successful build. Currently, there is no * way to elaborate on a failed build. * * In the background, this will spawn a prover instance running in a special, * non-interactive mode. Build products will be put into `~/.isabelle` on the * file system. */ def build(env: Environment, config: Configuration): Boolean = env.build(config) == 0 private final case class OperationState[T]( env: Environment, observer: Observer[T], firehose: Boolean = false, promise: Promise[ProverResult[T]] = Promise[ProverResult[T]] ) { self => class Output(name: String) { def unapply(markup: Markup): Option[Long] = markup match { case (env.protocolTag, (env.functionTag, `name`) :: ("id", id) :: Nil) => catching(classOf[NumberFormatException]) opt id.toLong case _ => None } } object Response extends Output("libisabelle_response") object Start extends Output("libisabelle_start") object Stop extends Output("libisabelle_stop") def tryComplete() = observer match { case Observer.Success(t) => promise.success(t); true case Observer.Failure(err) => promise.failure(err); true case _ => false } def advance(id: Long, markup: Markup, body: XML.Body) = observer match { case Observer.More(step, finish) => (markup, body) match { case (Response(id1), List(tree)) if id == id1 => copy(observer = finish(tree)) case (Start(id1), _) if id == id1 && !firehose => copy(firehose = true) case (Stop(id1), _) if id == id1 && firehose => copy(firehose = false) case _ if firehose => copy(observer = step(XML.elem(markup, body))) case _ => this } case _ => this } } /** * Asynchronously create a new [[System system]] based on the specified * [[info.hupel.isabelle.api.Configuration configuration]]. * * The behaviour of this function when the given configuration has not been * [[build built]] yet is unspecified. Since building is idempotent, it is * recommended to always build a configuration at least once before creating * a system. * * This function is thread-safe. It is safe to create multiple system * based on the same environment or even configuration. It is guaranteed that * they are independent, that is, calling any method will not influence other * systems. * * Build products will be read from `~/.isabelle` on the file system. */ def create(env: Environment, config: Configuration, pingTimeout: FiniteDuration = 5.seconds): Future[System] = { val Ping = Operation.implicitly[Unit, Unit]("ping") import env.scheduler val system = new System(env, config) system.initPromise.future.flatMap { _ => logger.debug("Pinging system ...") val pong = system.invoke(Ping)(()) pong.foreach { _ => logger.debug("Ping operation successful") } val noPong = Future.failed(StartupException(StartupException.NoPong)) FutureUtils.timeoutTo( pong, pingTimeout, system.dispose.flatMap(_ => noPong) ).recoverWith { case _: CancellationException => noPong // Why `noPong` twice? Race condition. // `system.dispose` will cancel all the pending operations (including `pong`), which means // that before `system.dispose` could complete, `pong` will complete (in a cancelled // state). Hence, we "recover" a cancellation at this stage by switching the error cause // to `noPong`. } }.map(_ => system) } } /** * A running instance of a prover. * * This class is thread-safe, that is, running multiple * [[Operation operations]] at the same time is expected and safe. * * @see [[info.hupel.isabelle.setup.Setup]] */ final class System private(val env: Environment, config: Configuration) { private val logger = getLogger /** * The scheduler used internally for bi-directional communication with the * prover. * * Guaranteed to be the same scheduler as the one of the * [[info.hupel.isabelle.api.Environment environment]] used to * [[System.create create]] this system. * * It is fine to use this scheduler for other purposes, for example to * transform the [[scala.concurrent.Future futures]] produced by * [[invoke invoking operations]] (note that `Scheduler` is a subtype of * [[scala.concurrent.ExecutionContext `ExecutionContext`]]). * * Since it is marked as `implicit`, it can be readily imported and used. */ implicit val scheduler: Scheduler = env.scheduler private val initPromise = Promise[Unit] private val exitPromise = Promise[Unit] @volatile private var count = 0L @volatile private var pending = Map.empty[Long, System.OperationState[_]] private def consumer(markup: Markup, body: XML.Body): Unit = (markup, body) match { case ((env.initTag, _), _) => env.sendOptions(session) logger.debug("Session started") eval.foreach(rawEval) initPromise.success(()) () case ((env.exitTag, _), _) => initPromise.tryFailure(System.StartupException(System.StartupException.Exited)) logger.debug("Session terminated") exitPromise.success(()) () case ((tag, _), body) if env.printTags contains tag => body.foreach { tree => logger.trace(s"Output ($tag): ${tree.stripMarkup}") } case _ => synchronized { pending = pending.map { case (id, state) => id -> state.advance(id, markup, body) } .filterNot(_._2.tryComplete()) .toMap } } logger.debug("Starting session ...") private val (session, eval) = env.create(config, consumer) /** * Instruct the prover to shutdown. * * Includes orderly cancellation of all running operations. Pending futures * will be marked as failed. * * It is guaranteed that when the returned [[scala.concurrent.Future future]] * succeeds, the prover has been shut down propertly. * * Calling anything after `dispose` is undefined. The object should not be used * afterwards. */ def dispose: Future[Unit] = { env.dispose(session) pending.foreach { case (_, state) => state.promise.tryFailure(new CancellationException()) } exitPromise.future } def rawEval(ml: String): Unit = env.eval(session, ml) /** * Invoke an [[Operation operation]] on the prover, that is, * [[Codec#encode encode]] the input argument, send it to the prover and * stream the results to the [[Observer observer]] of the operation. * * The returned [[scala.concurrent.Future future]] gets fulfilled when the * observer transitions into either * `[[info.hupel.isabelle.Observer.Success Success]]` or * `[[info.hupel.isabelle.Observer.Failure Failure]]` state. * * In addition to that, the transaction can be cancelled via the `cancel` * method of the `CancelableFuture`. Cancellation entails marking the future * as failed and signalling the prover that the operation should be * interrupted. * * Any well-formed response, even if it is an "error", is treated as a * success. Only ill-formed responses, e.g. due to [[Codec#decode decoding]] * errors, will mark the future as failed. Custom observers may deviate from * this, but it is generally safe to assume that a failed future represents * an internal error (e.g. due to a wrong [[Codec codec]]), whereas a * successful future may contain expected errors (e.g. due to a wrong input * argument or a failing proof). */ def invoke[I, O](operation: Operation[I, O])(arg: I): CancelableFuture[ProverResult[O]] = { val (encoded, observer) = operation.prepare(arg) val state = new System.OperationState(env, observer) state.tryComplete() val count0 = synchronized { val count0 = count pending += (count -> state) count += 1 count0 } val args = List(count0.toString, operation.name, encoded.toYXML) env.sendCommand(session, "libisabelle", args) val promise = state.promise val cancel = Cancelable { () => promise.tryFailure(new CancellationException()) env.sendCommand(session, "libisabelle_cancel", List(count0.toString)) } CancelableFuture(promise.future, cancel) } def run[A](prog: Program[A], thyName: String): CancelableFuture[A] = { val interpreter = new FunctionK[Instruction, CancelableFuture] { def apply[T](instruction: Instruction[T]) = instruction.run(System.this, thyName).map(_.unsafeGet) } prog.foldMap(interpreter) } }
larsrh/libisabelle
modules/libisabelle/src/main/scala/System.scala
Scala
apache-2.0
10,201
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.compiler.v2_3.ast.rewriters import org.neo4j.cypher.internal.compiler.v2_3._ import org.neo4j.cypher.internal.frontend.v2_3.Rewriter import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite class DeMorganRewriterTest extends CypherFunSuite with PredicateTestSupport { val rewriter: Rewriter = deMorganRewriter()(mock[AstRewritingMonitor]) test("not (P and Q) iff (not P) or (not Q)") { not(and(P, Q)) <=> or(not(P), not(Q)) } test("not (P or Q) iff (not P) and (not Q)") { not(or(P, Q)) <=> and(not(P), not(Q)) } test("P xor Q iff (P or Q) and (not P or not Q)") { xor(P, Q) <=> and(or(P, Q), or(not(P), not(Q))) } test("not (P xor Q) iff (not P and not Q) or (P and Q)") { not(xor(P, Q)) <=> or( and( not(P), not(Q)), and( not(not(P)), not(not(Q)))) } }
HuangLS/neo4j
community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/ast/rewriters/DeMorganRewriterTest.scala
Scala
apache-2.0
1,714
/* * Copyright 2012 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.comcast.money.core.handlers import com.comcast.money.api.SpanInfo import com.typesafe.config.Config object SpanLogFormatter { def apply(conf: Config) = { implicit val c: Config = conf new SpanLogFormatter( spanStart = configValue("formatting.span-start", "Span: "), nullValue = configValue("formatting.null-value", "NULL"), logTemplate = configValue("formatting.log-template", "[ %s=%s ]"), spanDurationMsEnabled = configEnabled("formatting.span-duration-ms-enabled"), formatIdsAsHex = configEnabled("formatting.format-ids-as-hex"), spanIdKey = configValue("formatting.keys.span-id", "span-id"), traceIdKey = configValue("formatting.keys.trace-id", "trace-id"), parentIdKey = configValue("formatting.keys.parent-id", "parent-id"), spanNameKey = configValue("formatting.keys.span-name", "span-name"), appNameKey = configValue("formatting.keys.app-name", "app-name"), startTimeKey = configValue("formatting.keys.start-time", "start-time"), spanDurationKey = configValue("formatting.keys.span-duration", "span-duration"), spanDurationMsKey = configValue("formatting.keys.span-duration-ms", "span-duration-ms"), spanSuccessKey = configValue("formatting.keys.span-success", "span-success")) } private def configValue(key: String, defaultValue: String)(implicit conf: Config) = if (conf.hasPath(key)) conf.getString(key) else defaultValue private def configEnabled(key: String)(implicit conf: Config): Boolean = if (conf.hasPath(key)) conf.getString(key).toBoolean else false } class SpanLogFormatter( val spanStart: String, val nullValue: String, val logTemplate: String, val spanDurationMsEnabled: Boolean, val formatIdsAsHex: Boolean, val spanIdKey: String, val traceIdKey: String, val parentIdKey: String, val spanNameKey: String, val appNameKey: String, val startTimeKey: String, val spanDurationKey: String, val spanDurationMsKey: String, val spanSuccessKey: String) { def buildMessage(spanInfo: SpanInfo): String = { implicit val builder = new StringBuilder() builder.append(spanStart) append(spanIdKey, if (formatIdsAsHex) spanInfo.id.selfIdAsHex else spanInfo.id.selfId) append(traceIdKey, if (formatIdsAsHex) spanInfo.id.traceIdAsHex else spanInfo.id.traceId) append(parentIdKey, if (formatIdsAsHex) spanInfo.id.parentIdAsHex else spanInfo.id.parentId) append(spanNameKey, spanInfo.name) append(appNameKey, spanInfo.appName) append(startTimeKey, spanInfo.startTimeMillis) append(spanDurationKey, spanInfo.durationMicros) if (spanDurationMsEnabled) append(spanDurationMsKey, spanInfo.durationMicros / 1000) append(spanSuccessKey, spanInfo.success) val notes = spanInfo.notes.values().iterator() while (notes.hasNext) { val note = notes.next append(note.name, valueOrNull(note.value)) } builder.toString() } private def append[T](key: String, value: T)(implicit builder: StringBuilder): StringBuilder = builder.append(logTemplate.format(key, value)) private def valueOrNull[T](value: T) = if (value == null) nullValue else value }
Comcast/money
money-core/src/main/scala/com/comcast/money/core/handlers/SpanLogFormatter.scala
Scala
apache-2.0
3,865
package io.hydrosphere.mist.worker.runners import java.nio.file.{Files, Paths} import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes} import akka.stream.scaladsl.Flow import org.apache.commons.codec.digest.DigestUtils import org.apache.commons.io.FileUtils import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSpecLike, Matchers} import scala.concurrent.Await import scala.concurrent.duration.{Duration, _} class ArtifactDownloaderSpec extends FunSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfter { val basePath = Paths.get("./target/artifacts") it("should create SimpleArtifactDownloader") { val downloader = ArtifactDownloader.create("localhost", 2004, 262144000, basePath) downloader shouldBe a[HttpArtifactDownloader] } describe("SimpleArtifactDownloader") { val routes = Flow[HttpRequest].map { request => val uri = request.uri.toString() if (uri.endsWith(".jar")) { HttpResponse(status = StatusCodes.OK, entity = "JAR CONTENT") } else if (uri.endsWith("/sha")) { val data = DigestUtils.sha1Hex("JAR CONTENT") HttpResponse(status = StatusCodes.OK, entity = data) } else { HttpResponse(status = StatusCodes.NotFound, entity = s"Not found ${request.uri}") } } before { val f = basePath.toFile FileUtils.deleteQuietly(f) FileUtils.forceMkdir(f) } it("should download file if it not found locally") { val fileContent = MockHttpServer.onServer(routes, binding => { val port = binding.localAddress.getPort val downloader = ArtifactDownloader.create("localhost", port, 262144000, basePath) val artifact = Await.result(downloader.downloadArtifact("test.jar"), Duration.Inf) new String(Files.readAllBytes(artifact.local.toPath)) }) Await.result(fileContent, Duration.Inf) shouldBe "JAR CONTENT" } it("should not download file if sha of local file and remote not equal") { val localFile = basePath.resolve("test.jar") Files.write(localFile, "DIFFERENT".getBytes()) val fileContent = MockHttpServer.onServer(routes, binding => { val port = binding.localAddress.getPort val downloader = ArtifactDownloader.create("localhost", port, 262144000, basePath) val file = Await.result(downloader.downloadArtifact("test.jar"), Duration.Inf) file }) Await.result(fileContent, Duration.Inf).local.lastModified() shouldBe localFile.toFile.lastModified() } it("should not download file if checksums are correct") { val localFile = basePath.resolve("test.jar") Files.write(localFile, "JAR CONTENT".getBytes()) val fileF = MockHttpServer.onServer(routes, binding => { val port = binding.localAddress.getPort val downloader = ArtifactDownloader.create("localhost", port, 262144000, basePath) val file = Await.result(downloader.downloadArtifact("test.jar"), Duration.Inf) file }) Await.result(fileF, Duration.Inf).local.lastModified() == localFile.toFile.lastModified() } it("should fail when local and remote file not found") { val routes = Flow[HttpRequest].map {request => { HttpResponse(status = StatusCodes.NotFound, entity = s"Not found ${request.uri}") }} val fileF = MockHttpServer.onServer(routes, binding => { val port = binding.localAddress.getPort val downloader = ArtifactDownloader.create("localhost", port, 262144000, basePath) val fileF = downloader.downloadArtifact("test.jar") Await.result(fileF, Duration.Inf) }) intercept[IllegalArgumentException] { Await.result(fileF, 30.seconds) } } } } object MockHttpServer { import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.util.Timeout import scala.concurrent.duration._ import scala.concurrent.{Future, Promise} def onServer[A]( routes: Flow[HttpRequest, HttpResponse, _], f: (Http.ServerBinding) => A): Future[A] = { implicit val system = ActorSystem("mock-http-cli") implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher implicit val timeout = Timeout(1.seconds) val binding = Http().bindAndHandle(routes, "localhost", 0) val close = Promise[Http.ServerBinding] close.future .flatMap(binding => binding.unbind()) .onComplete(_ => { materializer.shutdown() Await.result(system.terminate(), Duration.Inf) }) val result = binding.flatMap(binding => { try { Future.successful(f(binding)) } catch { case e: Throwable => Future.failed(e) } finally { close.success(binding) } }) result } }
Hydrospheredata/mist
mist/worker/src/test/scala/io/hydrosphere/mist/worker/runners/ArtifactDownloaderSpec.scala
Scala
apache-2.0
4,853
package org.jetbrains.plugins.scala.traceLogger package protocol import SerializationApi.{ReadWriter => RW, _} import upickle.implicits.key import scala.annotation.nowarn /* final case class TraceLoggerMsg(msg: String, values: Seq[ValueDesc], stackTraceDiff: StackTraceDiff, enclosed: Option[Enclosing]) */ sealed abstract class TraceLoggerEntry object TraceLoggerEntry { implicit val rw: RW[TraceLoggerEntry] = macroRW @key("msg") final case class Msg(msg: String, values: Seq[ValueDesc], stackTraceDiff: StackTraceDiff) extends TraceLoggerEntry @key("start") final case class Start(msg: String, values: Seq[ValueDesc], stackTraceDiff: StackTraceDiff) extends TraceLoggerEntry @key("succ") final case class Success(result: Data) extends TraceLoggerEntry @key("fail") final case class Fail(exception: String) extends TraceLoggerEntry object Msg { implicit val rw: RW[Msg] = macroRW } object Start { implicit val rw: RW[Start] = macroRW } object Success { @nowarn("msg=match may not be exhaustive") implicit val rw: RW[Success] = macroRW } object Fail { @nowarn("msg=match may not be exhaustive") implicit val rw: RW[Fail] = macroRW } } final case class StackTraceDiff(base: Int, additional: Seq[StackTraceEntry]) object StackTraceDiff { implicit val rw: RW[StackTraceDiff] = macroRW } final case class StackTraceEntry(method: String, className: String, line: Int, fileName: String) { def toStackTraceElement: StackTraceElement = new StackTraceElement(className, method, fileName, line) } object StackTraceEntry { def from(element: StackTraceElement): StackTraceEntry = { StackTraceEntry(element.getMethodName, element.getClassName, element.getLineNumber, element.getFileName) } // Serialize StackTraceEntry as 3-tuple to have less json output implicit val rw: RW[StackTraceEntry] = SerializationApi .readwriter[(String, String, Int, String)] .bimap[StackTraceEntry]( e => (e.method, e.className, e.line, e.fileName), t => StackTraceEntry(t._1, t._2, t._3, t._4), ) }
JetBrains/intellij-scala
scala/traceLogger/src/org/jetbrains/plugins/scala/traceLogger/protocol/Messages.scala
Scala
apache-2.0
2,184
package spire package math import spire.math.ArbitrarySupport.{Positive, NonNegative} import scala.util.Try import org.scalatest.FunSuite import spire.implicits.{eqOps => _, _} import spire.laws.arb.{interval => interval_, rational} import spire.random.{Uniform, Dist} import org.scalatest.Matchers import org.scalacheck.Arbitrary._ import org.scalatest._ import prop._ import interval.Overlap._ import org.scalacheck._ import Gen._ import Arbitrary.arbitrary class IntervalTest extends FunSuite { def cc(n1: Double, n2: Double) = Interval.closed(n1, n2) def co(n1: Double, n2: Double) = Interval.openUpper(n1, n2) def oc(n1: Double, n2: Double) = Interval.openLower(n1, n2) def oo(n1: Double, n2: Double) = Interval.open(n1, n2) val e = Interval.empty[Double] val all = Interval.all[Double] test("[2, inf] is a superset of empty") { assert(Interval.atOrAbove(2).isSupersetOf(Interval.empty[Int])) } test("empty is empty") { assert(e.isEmpty) } test("point is point") { assert(Interval.point(2).isPoint) } test("[2,2] is point") { assert(Interval.closed(2, 2).isPoint) } test("[3,2] is empty") { assert(Interval.closed(3, 2).isEmpty) } test("empty interval is not above -1") { assert(!Interval.empty[Int].hasAbove(-1)) } test("empty interval is not below 1") { assert(!Interval.empty[Int].hasBelow(1)) } test("[2] has above 0") { assert(Interval.point(2).hasAbove(0)) } test("[-2] has below 0") { assert(Interval.point(-2).hasBelow(0)) } test("[0, 1] has at or above 1") { assert(Interval.closed(0, 1).hasAtOrAbove(1)) } test("[1, 2] has at or above 1") { assert(Interval.closed(1, 2).hasAtOrAbove(1)) } test("[1, 2] has above 1") { assert(Interval.closed(1, 2).hasAtOrAbove(1)) } test("(1, 2] has above 1") { assert(Interval.openLower(1, 2).hasAtOrAbove(1)) } test("Interval.point(2).toString == [2]") { assert(Interval.point(2).toString === "[2]") } test("Interval.empty.toString == (Ø)") { assert(Interval.empty[Int].toString === "(Ø)") } val a = cc(0.0, 4.0) test("a.contains(0.0) is true") { assert(a.contains(0.0) === true) } test("a.crosses(0.0) is false") { assert(a.crosses(0.0) === false) } test("a.contains(3.334) is true") { assert(a.contains(3.334) === true) } test("a.contains(8.334) is false") { assert(a.contains(8.334) === false) } val b = cc(-8.0, 2.0) test("b.contains(0.0) is true") { assert(b.contains(0.0) === true) } test("b.crosses(0.0) is true") { assert(b.crosses(0.0) === true) } val c = oc(0.0, 1.0) test("c.contains(0.0) is false") { assert(c.contains(0.0) === false) } test("c.crosses(0.0) is false") { assert(c.crosses(0.0) === false) } test("[3, 6] -- [3, 6] = nil") { assert(cc(3D, 6D) -- cc(3D, 6D) === Nil) } test("[3, 6] -- empty = [3, 6]") { assert(cc(3D, 6D) -- e === List(cc(3D, 6D))) } test("[3, 6] -- all = nil") { assert(cc(3D, 6D) -- all === Nil) } test("[3, 6] -- [4, 6] = [3, 4)") { assert(cc(3D, 6D) -- cc(4D, 6D) === List(co(3D, 4D))) } test("[3, 6] -- [4, 5] = [3, 4), (5, 6]") { assert(cc(3D, 6D) -- cc(4D, 5D) === List(co(3D, 4D), oc(5D, 6D))) } } class RingIntervalTest extends FunSuite { def cc(n1: Double, n2: Double) = Interval.closed(n1, n2) val a = cc(0.0, 4.0) test("a + a") { assert(a + a === cc(0.0, 8.0)) } test("a - a") { assert(a - a === cc(-4.0, 4.0)) } test("a * a") { assert(a * a === cc(0.0, 16.0)) } val b = cc(-8.0, 2.0) test("b + b") { assert(b + b === cc(-16.0, 4.0)) } test("b - b") { assert(b - b === cc(-10.0, 10.0)) } test("b * b") { assert(b * b === cc(-16.0, 64.0)) } import interval.{Open, Unbound, Closed} val c = 4.0 test("-(c, ∞) = (-∞, -c)") { assert( -Interval.fromBounds(Open(c), Unbound()) === Interval.fromBounds(Unbound(), Open(-c)) ) } test("-(-∞, c] = [-c, ∞)") { assert( -Interval.fromBounds(Unbound(), Closed(c)) === Interval.fromBounds(Closed(-c), Unbound()) ) } test("(c, ∞) * (-c) = (-∞, -c * c), c > 0") { assert( Interval.fromBounds(Open(c), Unbound()) * (-c) === Interval.fromBounds(Unbound(), Open(-c*c)) ) } test("(-∞, c] * (-c) = [-c * c, ∞), c > 0") { assert( Interval.fromBounds(Unbound(), Closed(c)) * (-c) === Interval.fromBounds(Closed(-c*c), Unbound()) ) } test("Interval multiplication bug #372") { val a = Interval(-1, 1) val b = Interval.above(1) val x = -1 val y = 10 assert(a.contains(x)) assert(b.contains(y)) assert((a*b).contains(x*y)) } test("Interval multiplication bug 1") { val a = Interval(-3, -2) val b = Interval.above(-10) val x = -3 val y = -9 assert(a.contains(x)) assert(b.contains(y)) assert((a*b).contains(x*y)) } test("Interval multiplication bug 2") { val a = Interval.atOrBelow(0) val b = Interval.below(-1) assert((a*b).contains(0)) } test("Interval multiplication bug 3") { val a = Interval.atOrBelow(0) val b = Interval.open(-2, -1) assert((a*b).contains(0)) } test("Interval multiplication bug 4") { val a = Interval.above(2) val b = Interval.closed(0, 1) assert((a*b).contains(0)) } } class IntervalGeometricPartialOrderTest extends FunSuite { import spire.optional.intervalGeometricPartialOrder._ import Interval.{openUpper, openLower, closed, open, point} test("[2, 3) === [2, 3)") { assert(openUpper(2, 3).partialCompare(openUpper(2, 3)) == 0.0) } test("[2, 3) < [3, 4]") { assert(openUpper(2, 3) < closed(3, 4)) } test("[2, 3] < (3, 4]") { assert(closed(2, 3) < openLower(3, 4)) } test("[2, 3] cannot be compared to [3, 4]") { assert(closed(2, 3).partialCompare(closed(3, 4)).isNaN) } test("[3, 4] > [2, 3)") { assert(closed(3, 4) > openUpper(2, 3)) } test("[2, 3) <= [3, 4]") { assert(openUpper(2, 3) <= closed(3, 4)) } test("[3, 4] >= [2, 3)") { assert(closed(3, 4) >= openUpper(2, 3)) } test("not [2, 3] < [3, 4]") { assert(!(closed(2, 3) < closed(3, 4))) } test("not [2, 3] <= [3, 4]") { assert(!(closed(2, 3) <= closed(3, 4))) } test("not [3, 4] > [3, 4]") { assert(!(closed(2, 3) > closed(3, 4))) } test("not [3, 4] >= [3, 4]") { assert(!(closed(2, 3) >= closed(3, 4))) } test("empty.partialCompare(empty) == 0.0") { assert(open(2, 2).partialCompare(open(3, 3)) == 0.0) } test("empty cannot be compared to [2, 3]") { assert(open(2, 2).partialCompare(closed(2, 3)).isNaN) } test("[2, 3] cannot be compared to empty") { assert(closed(2, 3).partialCompare(open(2, 2)).isNaN) } test("Minimal and maximal elements of {[1], [2, 3], [2, 4]}") { val intervals = Seq(point(1), closed(2, 3), closed(2, 4)) assert(intervals.pmin.toSet == Set(point(1))) assert(intervals.pmax.toSet == Set(closed(2, 3), closed(2, 4))) } } class IntervalSubsetPartialOrderTest extends FunSuite { import spire.optional.intervalSubsetPartialOrder._ import Interval.{openUpper, openLower, closed, open, point} test("Minimal and maximal elements of {[1, 3], [3], [2], [1]} by subset partial order") { val intervals = Seq(closed(1, 3), point(3), point(2), point(1)) assert(intervals.pmin.toSet == Set(point(1), point(2), point(3))) assert(intervals.pmax.toSet == Set(closed(1, 3))) } } // TODO: this is just the tip of the iceberg... we also need to worry about // unbounded intervals, closed vs open bounds, etc. class ContinuousIntervalTest extends FunSuite { def cc(n1: Double, n2: Double) = Interval.closed(n1, n2) val a = 2.0 val b = 5.0 val c = 1.0 val d = 4.0 // numerator interval crosses zero test("[-a,b] / [c,d]") { assert(cc(-a, b) / cc(c, d) === cc(-a / c, b / c)) } test("[-a,b] / [-d,-c]") { assert(cc(-a, b) / cc(-d, -c) === cc(b / -c, -a / -c)) } // numerator interval is positive test("[a,b] / [-d,-c]") { assert(cc(a, b) / cc(-d, -c) === cc(b / -c, a / -d)) } test("[a,b] / [c,d]") { assert(cc(a, b) / cc(c, d) === cc(a / d, b / c)) } // numerator interval is negative test("[-b,-a] / [-d,-c]") { assert(cc(-b, -a) / cc(-d, -c) === cc(-a / -d, -b / -c)) } test("[-b,-a] / [c,d]") { assert(cc(-b, -a) / cc(c, d) === cc(-b / c, -a / d)) } } class IntervalReciprocalTest extends FunSuite { def t(a: Interval[Rational], b: Interval[Rational]): Unit = test(s"[1]/$a = $b") { assert(a.reciprocal === b) } def error(a: Interval[Rational]): Unit = test(s"[1]/$a = error") { intercept[ArithmeticException] { a.reciprocal } } // point(x) t(Interval.point(r"1/5"), Interval.point(r"5")) t(Interval.point(r"-99"), Interval.point(r"-1/99")) error(Interval.point(r"0")) // above(x) t(Interval.above(r"3"), Interval.open(r"0", r"1/3")) t(Interval.above(r"0"), Interval.above(r"0")) //fixme error(Interval.above(r"-1")) // atOrAbove(x) t(Interval.atOrAbove(r"1/9"), Interval.openLower(r"0", r"9")) error(Interval.atOrAbove(r"0")) error(Interval.atOrAbove(r"-2")) // closed(x, y) t(Interval.closed(r"1/2", r"4"), Interval.closed(r"1/4", r"2")) error(Interval.closed(r"0", r"6")) error(Interval.closed(r"-2", r"1/5")) error(Interval.closed(r"-1/9", r"0")) t(Interval.closed(r"-70", r"-14"), Interval.closed(r"-1/14", r"-1/70")) // openLower(x, y) t(Interval.openLower(r"1/2", r"4"), Interval.openUpper(r"1/4", r"2")) t(Interval.openLower(r"0", r"6"), Interval.atOrAbove(r"1/6")) //fixme error(Interval.openLower(r"-2", r"1/5")) error(Interval.openLower(r"-1/9", r"0")) t(Interval.openLower(r"-70", r"-14"), Interval.openUpper(r"-1/14", r"-1/70")) // openUpper(x, y) t(Interval.openUpper(r"1/2", r"4"), Interval.openLower(r"1/4", r"2")) error(Interval.openUpper(r"0", r"6")) error(Interval.openUpper(r"-2", r"1/5")) t(Interval.openUpper(r"-1/9", r"0"), Interval.atOrBelow(r"-9")) //fixme t(Interval.openUpper(r"-70", r"-14"), Interval.openLower(r"-1/14", r"-1/70")) // open t(Interval.open(r"1/2", r"4"), Interval.open(r"1/4", r"2")) t(Interval.open(r"0", r"6"), Interval.above(r"1/6")) //fixme error(Interval.open(r"-2", r"1/5")) t(Interval.open(r"-1/9", r"0"), Interval.below(r"-9")) //fixme t(Interval.open(r"-70", r"-14"), Interval.open(r"-1/14", r"-1/70")) // below(x) error(Interval.below(r"3")) t(Interval.below(r"0"), Interval.below(r"0")) //fixme t(Interval.below(r"-1"), Interval.open(r"-1", r"0")) //fixme // atOrBelow(x) error(Interval.atOrBelow(r"1/9")) error(Interval.atOrBelow(r"0")) t(Interval.atOrBelow(r"-2"), Interval.openUpper(r"-1/2", r"0")) //fixme } class IntervalCheck extends PropSpec with Matchers with GeneratorDrivenPropertyChecks { property("x ⊆ x") { forAll { (x: Interval[Rational]) => (x isSupersetOf x) shouldBe true } } property("x ⊆ (x | y) && y ⊆ (x | y)") { forAll { (x: Interval[Rational], y: Interval[Rational]) => val z = x | y (z isSupersetOf x) shouldBe true (z isSupersetOf y) shouldBe true } } property("(x & y) ⊆ x && (x & y) ⊆ y") { forAll { (x: Interval[Rational], y: Interval[Rational]) => val z = x & y (x isSupersetOf z) shouldBe true (y isSupersetOf z) shouldBe true } } val rng = spire.random.GlobalRng property("(x -- y) ⊆ x && (x -- y) & y = Ø") { forAll { (x: Interval[Rational], y: Interval[Rational]) => (x -- y).foreach { zi => (zi isSubsetOf x) shouldBe true (zi intersects y) shouldBe false } } } property("(x -- Ø) = x") { forAll { (x: Interval[Rational]) => if (x.nonEmpty) { (x -- Interval.empty[Rational]) shouldBe List(x) } } } property("(x -- x) = Ø") { forAll { (x: Interval[Rational]) => (x -- x) shouldBe Nil } } property("(x -- (-∞, ∞)) = Ø") { forAll { (x: Interval[Rational]) => (x -- Interval.all[Rational]) shouldBe Nil } } def sample(int: Interval[Rational], n: Int): Array[Rational] = if (int.isEmpty) { Array.empty[Rational] } else { import spire.math.interval.ValueBound val underlyingf: () => Rational = (int.lowerBound, int.upperBound) match { case (ValueBound(x) , ValueBound(y)) => () => rng.nextInt(10) match { case 0 => x case 9 => y case _ => x + Rational(rng.nextDouble) * (y - x) } case (ValueBound(x) , _) => () => rng.nextInt(5) match { case 0 => x case _ => x + (Rational(rng.nextGaussian).abs * Long.MaxValue) } case (_, ValueBound(y)) => () => rng.nextInt(5) match { case 4 => y case _ => y - (Rational(rng.nextGaussian).abs * Long.MaxValue) } case (_ , _) => () => Rational(rng.nextGaussian) * Long.MaxValue } def nextf(): Rational = { val r = underlyingf() if (int.contains(r)) r else nextf() } Array.fill(n)(nextf()) } val tries = 100 def testUnop(f: Interval[Rational] => Interval[Rational])(g: Rational => Rational): Unit = { forAll { (a: Interval[Rational]) => val c: Interval[Rational] = f(a) sample(a, tries).foreach { x => val ok = c.contains(g(x)) if (!ok) println("%s failed on %s" format (a, x.toString)) ok shouldBe true } } } def testBinop(f: (Interval[Rational], Interval[Rational]) => Interval[Rational])(g: (Rational, Rational) => Rational): Unit = { forAll { (a: Interval[Rational], b: Interval[Rational]) => val c: Interval[Rational] = f(a, b) sample(a, tries).zip(sample(b, tries)).foreach { case (x, y) => if (!a.contains(x)) println("%s does not contain %s" format (a, x)) if (!b.contains(y)) println("%s does not contain %s" format (b, y)) val ok = c.contains(g(x, y)) if (!ok) println("(%s, %s) failed on (%s, %s)" format (a, b, x.toString, y.toString)) ok shouldBe true } } } property("sampled unop abs") { testUnop(_.abs)(_.abs) } property("sampled unop -") { testUnop(-_)(-_) } property("sampled unop pow(2)") { testUnop(_.pow(2))(_.pow(2)) } property("sampled unop pow(3)") { testUnop(_.pow(3))(_.pow(3)) } property("sampled binop +") { testBinop(_ + _)(_ + _) } property("sampled binop -") { testBinop(_ - _)(_ - _) } property("sampled binop *") { testBinop(_ * _)(_ * _) } property("sampled binop vmin") { testBinop(_ vmin _)(_ min _) } property("sampled binop vmax") { testBinop(_ vmax _)(_ max _) } property("toString/apply") { forAll { (x: Interval[Rational]) => Interval(x.toString) shouldBe x } } property("points compare as scalars") { import spire.optional.intervalGeometricPartialOrder._ import spire.algebra.{Order, PartialOrder} forAll { (x: Rational, y: Rational) => val a = Interval.point(x) val b = Interval.point(y) PartialOrder[Interval[Rational]].tryCompare(a, b).get shouldBe Order[Rational].compare(x, y) val Some(Point(vmin)) = a.pmin(b) vmin shouldBe x.min(y) val Some(Point(vmax)) = a.pmax(b) vmax shouldBe x.max(y) } } property("(-inf, a] < [b, inf) if a < b") { import spire.optional.intervalGeometricPartialOrder._ import spire.algebra.{Order, PartialOrder} forAll { (a: Rational, w: Positive[Rational]) => val b = a + w.num // a < b val i = Interval.atOrBelow(a) val j = Interval.atOrAbove(b) (i < j) shouldBe true (i >= j) shouldBe false (j > i) shouldBe true (j <= i) shouldBe false } } property("(-inf, a] does not compare to [b, inf) if a >= b") { import spire.optional.intervalGeometricPartialOrder._ import spire.algebra.{Order, PartialOrder} forAll { (a: Rational, w: NonNegative[Rational]) => val b = a - w.num // a >= b val i = Interval.atOrBelow(a) val j = Interval.atOrAbove(b) i.partialCompare(j).isNaN shouldBe true j.partialCompare(i).isNaN shouldBe true } } property("(-inf, inf) does not compare with [a, b]") { import spire.optional.intervalGeometricPartialOrder._ import spire.algebra.{Order, PartialOrder} forAll { (a: Rational, b: Rational) => val i = Interval.all[Rational] val j = Interval.closed(a, b) i.partialCompare(j).isNaN shouldBe true j.partialCompare(i).isNaN shouldBe true } } property("empty intervals are equal") { forAll { (x: Rational, y: Rational) => import spire.algebra.Eq val a = Interval.open(x, x) val b = Interval.open(y, y) val c = Interval.openUpper(x, x) val d = Interval.openLower(x, x) val e = Interval.empty[Rational] a shouldBe e a shouldBe b b shouldBe e c shouldBe e d shouldBe e e shouldBe e Eq[Interval[Rational]].eqv(a, e) shouldBe true Eq[Interval[Rational]].eqv(a, b) shouldBe true Eq[Interval[Rational]].eqv(b, e) shouldBe true Eq[Interval[Rational]].eqv(c, e) shouldBe true Eq[Interval[Rational]].eqv(d, e) shouldBe true Eq[Interval[Rational]].eqv(e, e) shouldBe true } } } class IntervalIteratorCheck extends PropSpec with Matchers with GeneratorDrivenPropertyChecks { property("bounded intervals are ok") { forAll { (n1: Rational, n2: Rational, num0: Byte) => val (x, y) = if (n1 <= n2) (n1, n2) else (n2, n1) val num = ((num0 & 255) % 13) + 1 def testEndpoints(interval: Interval[Rational], step: Rational, hasLower: Boolean, hasUpper: Boolean): Unit = { val ns = interval.iterator(step).toSet ns(x) shouldBe hasLower ns(y) shouldBe hasUpper val extra = if (hasLower && hasUpper) 2 else if (hasLower || hasUpper) 1 else 0 ns.size shouldBe (num - 1 + extra) } val cc = Interval.closed(x, y) // [x, y] val oo = Interval.open(x, y) // (x, y) val oc = Interval.openLower(x, y) // (x, y] val co = Interval.openUpper(x, y) // [x, y) val step = (y - x) / num if (step.isZero) { List(cc, oo, oc, co).foreach { xs => Try(xs.iterator(0)).isFailure shouldBe true } } else { val triples = List((cc, true, true), (oo, false, false), (oc, false, true), (co, true, false)) triples.foreach { case (interval, hasLower, hasUpper) => testEndpoints(interval, step, hasLower, hasUpper) testEndpoints(interval, -step, hasLower, hasUpper) } } } } property("half-unbound intervals are ok") { forAll { (n: Rational, s: Rational) => val step0 = s.abs val cu = Interval.atOrAbove(n) // [n, ∞) val ou = Interval.above(n) // (n, ∞) val uc = Interval.atOrBelow(n) // (-∞, n] val uo = Interval.below(n) // (-∞, n) if (step0.isZero) { List(cu, ou, uc, uo).foreach { xs => Try(xs.iterator(0)).isFailure shouldBe true } } else { val triples = List((cu, true, 1), (ou, false, 1), (uc, true, -1), (uo, false, -1)) triples.foreach { case (interval, hasN, mult) => val step = step0 * mult val it = interval.iterator(step) val expected = if (hasN) n else n + step it.next() shouldBe expected Try(interval.iterator(-step)).isFailure shouldBe true } } } } property("unbound intervals are not supported") { forAll { (step: Rational) => Try(Interval.all[Rational].iterator(step)).isFailure shouldBe true } } } class IntervalOverlapCheck extends PropSpec with Matchers with GeneratorDrivenPropertyChecks { property("(x overlap y) = (y overlap x)") { forAll() { (x: Interval[Rational], y: Interval[Rational]) => x.overlap(y) shouldBe y.overlap(x) } } property("x overlap x = Equal(x, x)") { forAll() { x: Interval[Rational] => x.overlap(x) shouldBe Equal[Rational]() } } property("(x overlap Ø) = Subset(Ø, x) id x != Ø") { forAll() { x: Interval[Rational] => whenever(x.nonEmpty) { val empty = Interval.empty[Rational] x.overlap(empty) shouldBe Subset(empty, x) } } } property("consistency with Interval#isSubset") { forAll() { (x: Interval[Rational], y: Interval[Rational]) => x.overlap(y).isSubset shouldBe (x.isSubsetOf(y) || y.isSubsetOf(x)) } } property("(-inf, a] overlap [a, +inf) = PartialOverlap") { forAll() { (x: Rational) => Interval.atOrBelow(x).overlap(Interval.atOrAbove(x)) shouldBe a[PartialOverlap[_]] } } property("[a, c) overlap (b, d] = PartialOverlap if a < b < c < d") { forAll() { (x: Rational, y: Rational, m: Rational, n: Rational) => import spire.algebra.Order.ordering val sorted = List(x, y, m, n).sorted whenever(sorted.distinct == sorted) { Interval.openUpper(sorted(0), sorted(2)).overlap(Interval.openLower(sorted(1), sorted(3))) shouldBe a[PartialOverlap[_]] } } } property("[a, c] overlap [b, d] = PartialOverlap if a < b <= c < d") { forAll() { (x: Rational, y: Rational, m: Rational, n: Rational) => import spire.algebra.Order.ordering val sorted = List(x, y, m, n).sorted whenever(sorted.distinct.size >= 3 && sorted(0) != sorted(1) && sorted(2) != sorted(3)) { Interval.closed(sorted(0), sorted(2)).overlap(Interval.closed(sorted(1), sorted(3))) shouldBe a[PartialOverlap[_]] } } } property("(-inf, a) overlap (b, +inf) = PartialOverlap if a > b") { forAll() { (x: Rational, y: Rational) => whenever(x != y) { Interval.below(max(x, y)).overlap(Interval.above(min(x, y))) shouldBe a[PartialOverlap[_]] } } } property("(-inf, a) overlap (b, +inf) = Disjoint if a <= b") { forAll() { (x: Rational, y: Rational) => Interval.below(min(x, y)).overlap(Interval.above(max(x, y))).isDisjoint shouldBe true } } property("Disjoint((-inf, a), (b, +inf)).join = [a, b]") { forAll() { (x: Rational, y: Rational) => val l = min(x, y) val u = max(x, y) Disjoint(Interval.below(l), Interval.above(u)).join shouldBe Interval.closed(l, u) } } property("[a, b) overlap (c, d] = Disjoint if a < b <= c < d") { forAll() { (x: Rational, y: Rational, m: Rational, n: Rational) => import spire.algebra.Order.ordering val sorted = List(x, y, m, n).sorted whenever(sorted(0) < sorted(1) && sorted(2) < sorted(3)) { val overlap = Interval.openUpper(sorted(0), sorted(1)).overlap(Interval.openLower(sorted(2), sorted(3))) overlap.isDisjoint shouldBe true overlap.asInstanceOf[Disjoint[Rational]].join shouldBe Interval.closed(sorted(1), sorted(2)) } } } property("[a, b] overlap [c, d] = Disjoint if a <= b < c <= d") { forAll() { (x: Rational, y: Rational, m: Rational, n: Rational) => import spire.algebra.Order.ordering val sorted = List(x, y, m, n).sorted whenever(sorted(1) < sorted(2)) { val overlap = Interval.closed(sorted(0), sorted(1)).overlap(Interval.closed(sorted(2), sorted(3))) overlap.isDisjoint shouldBe true overlap.asInstanceOf[Disjoint[Rational]].join shouldBe Interval.open(sorted(1), sorted(2)) } } } property("x overlap [a] is never a PartialOverlap") { forAll() { (x: Interval[Rational], b: Rational) => x.overlap(Interval.point(b)) should not be a[PartialOverlap[_]] } } }
tixxit/spire
tests/src/test/scala/spire/math/IntervalTest.scala
Scala
mit
23,308
package com.rocketfuel.sdbc.cassandra import com.rocketfuel.sdbc.Cassandra._ import com.rocketfuel.sdbc.Cassandra.syntax._ import org.scalatest.FunSuite class syntaxSpec extends FunSuite { test("syntax works") { implicit def x: Queryable[Int, Int] = ??? implicit def session: Session = ??? assertCompiles("""3.iterator()""") } }
rocketfuel/sdbc
cassandra/src/test/scala/com/rocketfuel/sdbc/cassandra/syntaxSpec.scala
Scala
bsd-3-clause
350
object OhNoes { sealed trait F sealed abstract class FA extends F sealed abstract class FB extends F case object FA1 extends FA case object FB1 extends FB case object FB2 extends FB sealed trait G case object G1 extends G case object G2 extends G sealed trait H case class H1(a: FB, b: G) extends H case class H2(b: F) extends H val demo: H => Unit = { case H1(FB1, G1) => case H1(FB2, G2) => case H2(_: FB) => case H2(_: FA) => case H1(FB1, G2) => case H1(FB2, G1) => } val demo2: H => Unit = { case H2(_: FA) => case H2(_: FB) => case H1(FB1, G1) => case H1(FB2, G1) => case H1(FB1, G2) => case H1(FB2, G2) => } }
som-snytt/dotty
tests/patmat/t9411b.scala
Scala
apache-2.0
709
/* * Copyright (C) 2015 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stratio.sparta.plugin.parser.datetime import com.stratio.sparta.plugin.parser.datetime.DateTimeParser import org.apache.spark.sql.Row import org.apache.spark.sql.types.{StringType, StructField, StructType} import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.{Matchers, WordSpecLike} @RunWith(classOf[JUnitRunner]) class DateTimeParserTest extends WordSpecLike with Matchers { val inputField = "ts" val outputsFields = Seq("ts") //scalastyle:off "A DateTimeParser" should { "parse unixMillis to string" in { val input = Row(1416330788000L) val schema = StructType(Seq(StructField("ts", StringType))) val result = new DateTimeParser(1, inputField, outputsFields, schema, Map("inputFormat" -> "unixMillis")) .parse(input, false) val expected = Row(1416330788000L, "1416330788000") assertResult(result)(expected) } "parse unix to string" in { val input = Row(1416330788) val schema = StructType(Seq(StructField("ts", StringType))) val result = new DateTimeParser(1, inputField, outputsFields, schema, Map("inputFormat" -> "unix")) .parse(input, false) val expected = Row(1416330788, "1416330788000") assertResult(result)(expected) } "parse unix to string removing raw" in { val input = Row(1416330788) val schema = StructType(Seq(StructField("ts", StringType))) val result = new DateTimeParser(1, inputField, outputsFields, schema, Map("inputFormat" -> "unix")) .parse(input, true) val expected = Row("1416330788000") assertResult(result)(expected) } "not parse anything if the field does not match" in { val input = Row("1212") val schema = StructType(Seq(StructField("otherField", StringType))) an[IllegalStateException] should be thrownBy new DateTimeParser(1, inputField, outputsFields, schema, Map("inputFormat" -> "unixMillis")).parse(input, false) } "not parse anything and generate a new Date" in { val input = Row("anything") val schema = StructType(Seq(StructField("ts", StringType))) val result = new DateTimeParser(1, inputField, outputsFields, schema, Map("inputFormat" -> "autoGenerated")) .parse(input, false) assertResult(result.size)(2) } "not parse if inputFormat does not exist" in { val input = Row("1416330788") val schema = StructType(Seq(StructField("ts", StringType))) val result = new DateTimeParser(1, inputField, outputsFields, schema, Map()).parse(input, false) val expected = Row("1416330788", "1416330788") assertResult(result)(expected) } "parse dateTime in hive format" in { val input = Row("2015-11-08 15:58:58") val schema = StructType(Seq(StructField("ts", StringType))) val result = new DateTimeParser(1, inputField, outputsFields, schema, Map("inputFormat" -> "hive")) .parse(input, false) val expected = Row("2015-11-08 15:58:58", "1446998338000") assertResult(result)(expected) } } }
danielcsant/sparta
plugins/src/test/scala/com/stratio/sparta/plugin/parser/datetime/DateTimeParserTest.scala
Scala
apache-2.0
3,775
import sbt._ import Keys._ object ExtraProjectPluginExample2 extends AutoPlugin { // Enable this plugin by default override def requires: Plugins = sbt.plugins.CorePlugin override def trigger = allRequirements override def derivedProjects(proj: ProjectDefinition[_]): Seq[Project] = // Make sure to exclude project extras to avoid recursive generation if (proj.projectOrigin != ProjectOrigin.DerivedProject) { val id = proj.id + "1" Seq( Project(id, file(id)). enablePlugins(DatabasePlugin) ) } else Nil }
som-snytt/xsbt
sbt/src/sbt-test/project/extra-projects/project/ExtraProjectPluginExample2.scala
Scala
bsd-3-clause
568
package com.nulabinc.backlog.r2b.redmine.service import javax.inject.Inject import com.nulabinc.backlog.migration.common.utils.Logging import com.nulabinc.backlog.r2b.redmine.conf.RedmineApiConfiguration import com.taskadapter.redmineapi.RedmineManager import com.taskadapter.redmineapi.bean.News import scala.jdk.CollectionConverters._ /** * @author uchida */ class NewsServiceImpl @Inject() ( apiConfig: RedmineApiConfiguration, redmine: RedmineManager ) extends NewsService with Logging { override def allNews(): Seq[News] = try { redmine.getProjectManager.getNews(apiConfig.projectKey).asScala.toSeq } catch { case e: Throwable => logger.warn(e.getMessage, e) Seq.empty[News] } }
nulab/BacklogMigration-Redmine
redmine/src/main/scala/com/nulabinc/backlog/r2b/redmine/service/NewsServiceImpl.scala
Scala
mit
748
package org.excala.tests import org.scalatest.Tag /** * Contains all of the tags for tests that we'll use. */ trait ExpectTags { object TimedTest extends Tag("org.excala.TimedTest") }
edmundnoble/Excala
src/org/excala/tests/ExpectTags.scala
Scala
bsd-3-clause
190
package org.jetbrains.plugins.scala.lang.psi.controlFlow.impl import com.intellij.psi.PsiNamedElement import org.jetbrains.plugins.scala.lang.psi.controlFlow.ScControlFlowPolicy /** * Nikolay.Tropin * 2014-04-14 */ object AllVariablesControlFlowPolicy extends ScControlFlowPolicy { override def isElementAccepted(named: PsiNamedElement): Boolean = true }
gtache/intellij-lsp
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/controlFlow/impl/AllVariablesControlFlowPolicy.scala
Scala
apache-2.0
362
package org.powlab.jeye.utils import org.powlab.jeye.core.Opcodes._ import org.powlab.jeye.decode.RuntimeOpcode import org.powlab.jeye.decode.RuntimeOpcodes._ import org.powlab.jeye.decode.graph.OpcodeNodes._ import org.powlab.jeye.decode.graph.OpcodeTree import scala.collection.mutable.Map import org.powlab.jeye.decode.LocalVariable import org.powlab.jeye.decode.graph.OpcodeNode /** * Для печати сущностей */ object PrintUtils { def opcodeToString(opcode: OpCode) = opcode.name + " // " + opcode.operation + " (code " + opcode.code + " hex " + opcode.hex + ")" def printVariables(localVariables: Array[LocalVariable]) { localVariables.filter(_ != null).foreach(localVariable => println(localVariable.name.view(localVariable))) } def printRuntimeOpcodes(runtimeOpcodes: Array[RuntimeOpcode], cpUtils: ConstantPoolUtils, numberPad: Int = 2) { runtimeOpcodes.foreach(runtimeOpcode => println(runtimeOpcodeToString(runtimeOpcode, cpUtils, numberPad))) } /** * TODO here: использовать маркер */ def printGraph(tree: OpcodeTree) { val nodeToState = Map[String, Boolean]() def print(node: OpcodeNode, indent: String) { var current = node while (current != null) { val number = current.id; if (!nodeToState.getOrElse(number, false)) { nodeToState.put(number, true) } else { return } println(indent + current) if (current.branchy) { tree.nexts(current).foreach(node => { println(indent + " ---- new block"); print(node, indent + " ") }) return } else { current = tree.next(current) } } } print(tree.head, ""); } }
powlab/jeye
src/main/scala/org/powlab/jeye/utils/PrintUtils.scala
Scala
apache-2.0
1,773
/* * Copyright 2011-2022 GatlingCorp (https://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.http.feeder import java.io.InputStream import java.nio.charset.Charset import scala.jdk.CollectionConverters._ import scala.util.Using import io.gatling.core.check.xpath.XmlParsers import io.gatling.core.feeder.Record import io.gatling.core.util.Resource import net.sf.saxon.s9api.XdmNodeKind /** * Parser for files in [[http://www.sitemaps.org/protocol.html sitemap]] format. */ object SitemapParser { val LocationTag = "loc" /** * Parse file in sitemap format. Returns a Record for each location described * in a sitemap file. * * @param resource resource to parse * @return a record for each url described in a sitemap file */ def parse(resource: Resource, charset: Charset): IndexedSeq[Record[String]] = Using.resource(resource.inputStream) { stream: InputStream => parse(stream, charset) } /** * Parse a file in sitemap format. Returns a Record for each location described * in a sitemap file. * * @param inputStream stream for the file to parse * @return a record for each url described in a sitemap file */ private[feeder] def parse(inputStream: InputStream, charset: Charset): IndexedSeq[Record[String]] = { val root = XmlParsers.parse(inputStream, charset) (for { urlset <- root.children("urlset").asScala if urlset.getNodeKind == XdmNodeKind.ELEMENT url <- urlset.children("url").asScala if urlset.getNodeKind == XdmNodeKind.ELEMENT } yield { val urlChildren = url.children.asScala.toVector val record = urlChildren.collect { case child if child.getNodeKind == XdmNodeKind.ELEMENT => child.getNodeName.getLocalName -> child.getStringValue }.toMap if (!record.contains(LocationTag) || record(LocationTag).isEmpty) { throw new SitemapFormatException("No 'loc' child in 'url' element") } record }).toVector } } class SitemapFormatException(msg: String) extends Exception
gatling/gatling
gatling-http/src/main/scala/io/gatling/http/feeder/SitemapParser.scala
Scala
apache-2.0
2,582
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package iht.controllers.application.assets.insurancePolicy import iht.config.AppConfig import iht.connector.{CachingConnector, IhtConnector} import iht.controllers.application.EstateController import iht.forms.ApplicationForms._ import iht.metrics.IhtMetrics import iht.models.application.ApplicationDetails import iht.models.application.assets._ import iht.utils.ApplicationKickOutHelper import iht.views.html.application.asset.insurancePolicy.insurance_policy_details_more_than_max_value import javax.inject.Inject import play.api.mvc.MessagesControllerComponents import uk.gov.hmrc.auth.core.AuthConnector import uk.gov.hmrc.auth.core.retrieve.v2.Retrievals.{nino => ninoRetrieval} import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController class InsurancePolicyDetailsMoreThanMaxValueControllerImpl @Inject()(val metrics: IhtMetrics, val ihtConnector: IhtConnector, val cachingConnector: CachingConnector, val authConnector: AuthConnector, val insurancePolicyDetailsMoreThanMaxValueView: insurance_policy_details_more_than_max_value, implicit val appConfig: AppConfig, val cc: MessagesControllerComponents) extends FrontendController(cc) with InsurancePolicyDetailsMoreThanMaxValueController { } trait InsurancePolicyDetailsMoreThanMaxValueController extends EstateController { override val applicationSection = Some(ApplicationKickOutHelper.ApplicationSectionAssetsInsurancePoliciesMoreThanMax) val insurancePolicyDetailsMoreThanMaxValueView: insurance_policy_details_more_than_max_value def onPageLoad = authorisedForIhtWithRetrievals(ninoRetrieval) { userNino => implicit request => { estateElementOnPageLoad[InsurancePolicy](insurancePolicyMoreThanMaxForm, insurancePolicyDetailsMoreThanMaxValueView.apply, _.allAssets.flatMap(_.insurancePolicy), userNino) } } def onSubmit = authorisedForIhtWithRetrievals(ninoRetrieval) { userNino => implicit request => { val updateApplicationDetails: (ApplicationDetails, Option[String], InsurancePolicy) => (ApplicationDetails, Option[String]) = (appDetails, _, insurancePolicy) => { val updatedAD = appDetails.copy(allAssets = Some(appDetails.allAssets.fold (new AllAssets(action = None, insurancePolicy = Some(insurancePolicy)))(allAssets => updateAllAssetsWithInsurancePolicy(allAssets, insurancePolicy, identity)) )) (updatedAD, None) } estateElementOnSubmitConditionalRedirect[InsurancePolicy](insurancePolicyMoreThanMaxForm, insurancePolicyDetailsMoreThanMaxValueView.apply, updateApplicationDetails, (ad, _) => ad.allAssets.flatMap(allAssets => allAssets.insurancePolicy).flatMap(_.moreThanMaxValue) .fold(insurancePoliciesRedirectLocation)(_ => iht.controllers.application.assets.insurancePolicy.routes.InsurancePolicyDetailsAnnuityController.onPageLoad()), userNino) } } }
hmrc/iht-frontend
app/iht/controllers/application/assets/insurancePolicy/InsurancePolicyDetailsMoreThanMaxValueController.scala
Scala
apache-2.0
3,876
package timetrace.math import timetrace.math.Vector4.Normalized import timetrace.math.Vector4.SpatiallyNormalized import scala.math.{ min, max } object Vector4 { class Normalized(x: Double, y: Double, z: Double, t: Double) extends Vector4(x, y, z, t) { assume(Math.abs(magnitude - 1.0) < 1e-6) override def normalize = this override def isNormalized() = true } // Normalized (x, y, z) part, ensure t is +/- 1.0. // Used mainly to describe ray directions class SpatiallyNormalized(v3: Vector3.Normalized, t: Double) extends Vector4(v3.x, v3.y, v3.z, t) { assume(t == -1.0 || t == 1.0) override def spatiallyNormalize = this } def componentMinimums(v1: Vector4, v2: Vector4): Vector4 = { return new Vector4( // min(v1.x, v2.x), // min(v1.y, v2.y), // min(v1.z, v2.z), // min(v1.t, v2.t)) } def componentMaximums(v1: Vector4, v2: Vector4): Vector4 = { return new Vector4( // max(v1.x, v2.x), // max(v1.y, v2.y), // max(v1.z, v2.z), // max(v1.t, v2.t)) } def clamp(mins: Vector4, maxs: Vector4)(v: Vector4): Vector4 = { assume(mins.x <= maxs.x) assume(mins.y <= maxs.y) assume(mins.z <= maxs.z) assume(mins.t <= maxs.t) return new Vector4( // max(mins.x, min(maxs.x, v.x)), // max(mins.y, min(maxs.y, v.y)), // max(mins.z, min(maxs.z, v.z)), // max(mins.t, min(maxs.t, v.t))) } } sealed case class Vector4(val x: Double, val y: Double, val z: Double, val t: Double) extends VectorN[Vector4] { override def toString(): String = s"[$x, $y, $z, $t]" def truncateTo3(): Vector3 = new Vector3(x, y, z) def dot(that: Vector4): Double = ( this.x * that.x + this.y * that.y + this.z * that.z + this.t * that.t) def *(s: Double): Vector4 = Vector4(x * s, y * s, z * s, t * s) def +(that: Vector4): Vector4 = Vector4( this.x + that.x, this.y + that.y, this.z + that.z, this.t + that.t) def normalize(): Vector4.Normalized = { val mag = magnitude() new Normalized(x / mag, y / mag, z / mag, t / mag) } def spatiallyNormalize(): Vector4.SpatiallyNormalized = { assert(t == -1.0 || t == 1.0) new SpatiallyNormalized(truncateTo3().normalize(), t) } }
pauldoo/scratch
timetrace/core/src/main/scala/timetrace/math/Vector4.scala
Scala
isc
2,259
/* Copyright 2012 Georgia Tech Research Institute Author: lance.gatlin@gtri.gatech.edu This file is part of org.gtri.util.xmlbuilder library. org.gtri.util.xmlbuilder library is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. org.gtri.util.xmlbuilder library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with org.gtri.util.xmlbuilder library. If not, see <http://www.gnu.org/licenses/>. */ package org.gtri.util.xmlbuilder.impl.events import org.gtri.util.scala.exelog.noop._ import org.gtri.util.xsddatatypes.XsdQName import org.gtri.util.issue.api.DiagnosticLocator import org.gtri.util.xmlbuilder.api.{XmlContract, XmlEvent} object EndXmlElementEvent { implicit val thisclass = classOf[EndXmlElementEvent] implicit val log = Logger.getLog(thisclass) } case class EndXmlElementEvent(qName : XsdQName, locator : DiagnosticLocator) extends XmlEvent { import EndXmlElementEvent._ def pushTo(contract: XmlContract) { log.block("pushTo",Seq("contract" -> contract)) { +"Pushing EndXmlElementEvent to XmlContract" ~"contract.endXmlElement()" contract.endXmlElement() } } }
gtri-iead/org.gtri.util.xmlbuilder
impl/src/main/scala/org/gtri/util/xmlbuilder/impl/events/EndXmlElementEvent.scala
Scala
gpl-3.0
1,590
package ops.android.app import android.app.Fragment import android.content.Context trait FragmentOps extends ContextOps { self: Fragment => override implicit lazy val context: Context = getActivity override def _getString(resId: Int, args: AnyRef*): String = getString(resId, args: _*) }
raizu/AndroidOps
src/main/scala/ops/android/app/FragmentOps.scala
Scala
apache-2.0
312
package models.view import models.view.cache.EncryptedCacheHandling /** * Creates an object which has access to methods for saving to and getting * from the cache. The CachedChangeOfCircs trait implements the cacheKey method required * for getting from the cache. * See Language (controller) and LanguageSpec (tests) for example * usage. */ class CacheHandlingWithCircs extends EncryptedCacheHandling with CachedChangeOfCircs
Department-for-Work-and-Pensions/ClaimCapture
c3/app/models/view/CacheHandlingWithCircs.scala
Scala
mit
434
/*********************************************************************** * Copyright (c) 2013-2017 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.process import java.util.ServiceLoader import org.geotools.process.factory.AnnotatedBeanProcessFactory import org.geotools.text.Text import org.locationtech.geomesa.process.GeoMesaProcessFactory.{Name, NameSpace, processes} class GeoMesaProcessFactory extends AnnotatedBeanProcessFactory(Name, NameSpace, processes: _*) object GeoMesaProcessFactory { val NameSpace = "geomesa" val Name = Text.text("GeoMesa Process Factory") def processes: Array[Class[_]] = { import scala.collection.JavaConversions._ ServiceLoader.load(classOf[GeoMesaProcess]).iterator().map(_.getClass).toArray } }
ronq/geomesa
geomesa-process/geomesa-process-wps/src/main/scala/org/locationtech/geomesa/process/GeoMesaProcessFactory.scala
Scala
apache-2.0
1,106
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs // Licence: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.core import akka.actor._ import akka.event.slf4j.SLF4JLogging import com.google.common.io.ByteStreams import java.io.{ File, IOException } import java.util.jar.JarFile import org.ensime.api._ import scala.collection.mutable class DocResolver( prefix: String, forceJavaVersion: Option[String] // for testing )( implicit config: EnsimeConfig ) extends Actor with ActorLogging with DocUsecaseHandling { var allDocJars: List[File] = _ var htmlToJar = Map.empty[String, File] var jarNameToJar = Map.empty[String, File] var docTypes = Map.empty[String, DocType] sealed trait DocType case object Javadoc extends DocType case object Javadoc8 extends DocType case object Scaladoc extends DocType // In javadoc docs, index.html has a comment that reads 'Generated by javadoc' private val JavadocComment = """Generated by javadoc (?:\\(([0-9\\.]+))?""".r.unanchored override def preStart(): Unit = { // On initialisation, do a fast scan (< 1s for 50 jars) to determine // the package contents of each jar, and whether it's a javadoc or // scaladoc. allDocJars = config.modules.values.flatMap(_.docJars).toList for ( jarFile <- allDocJars if jarFile.exists() ) { try { val jar = new JarFile(jarFile) val jarFileName = jarFile.getName jarNameToJar += jarFileName -> jarFile docTypes += (jarFileName -> Scaladoc) val enumEntries = jar.entries() while (enumEntries.hasMoreElements) { val entry = enumEntries.nextElement() if (!entry.isDirectory) { val f = new File(entry.getName) val dir = f.getParent if (dir != null) { htmlToJar += entry.getName -> jarFile } // Check for javadocs if (entry.getName == "index.html") { val bytes = ByteStreams.toByteArray(jar.getInputStream(entry)) new String(bytes) match { case JavadocComment(version: String) if version.startsWith("1.8") => docTypes += jarFileName -> Javadoc8 case JavadocComment(_*) => docTypes += jarFileName -> Javadoc case _ => } } } } } catch { case e: IOException => // continue regardless log.error("Failed to process doc jar: " + jarFile.getName, e) } } } private def javaFqnToPath(fqn: DocFqn): String = { if (fqn.typeName == "package") { fqn.pack.replace(".", "/") + "/package-summary.html" } else { fqn.pack.replace(".", "/") + "/" + fqn.typeName + ".html" } } def scalaFqnToPath(fqn: DocFqn): String = { if (fqn.typeName == "package") { fqn.pack.replace(".", "/") + "/package.html" } else fqn.pack.replace(".", "/") + "/" + fqn.typeName + ".html" } private def makeLocalUri(jar: File, sig: DocSigPair): String = { val jarName = jar.getName val docType = docTypes(jarName) val java = docType == Javadoc || docType == Javadoc8 if (java) { val path = javaFqnToPath(sig.java.fqn) val anchor = sig.java.member.map { s => "#" + { if (docType == Javadoc8) toJava8Anchor(s) else s } }.getOrElse("") s"$prefix/$jarName/$path$anchor" } else { val scalaSig = maybeReplaceWithUsecase(jar, sig.scala) val anchor = scalaSig.fqn.mkString + scalaSig.member.map { "@" + _ }.getOrElse("") s"$prefix/$jarName/index.html#$anchor" } } private val PackRegexp = """^((?:[a-z0-9]+\\.)+)""".r private def guessJar(sig: DocSigPair): Option[File] = { htmlToJar.get(scalaFqnToPath(sig.scala.fqn)) .orElse(htmlToJar.get(javaFqnToPath(sig.java.fqn))) } private def resolveLocalUri(sig: DocSigPair): Option[String] = { guessJar(sig) match { case Some(jar) => log.debug(s"Resolved to jar: $jar") Some(makeLocalUri(jar, sig)) case _ => log.debug(s"Failed to resolve doc jar for: $sig") None } } // Javadoc 8 changed the anchor format to remove illegal // url characters: parens, commas, brackets. // See https://bugs.eclipse.org/bugs/show_bug.cgi?id=432056 // and https://bugs.openjdk.java.net/browse/JDK-8025633 private val Java8Chars = """(?:, |\\(|\\)|\\[\\])""".r private def toJava8Anchor(anchor: String): String = { Java8Chars.replaceAllIn(anchor, { m => anchor(m.start) match { case ',' => "-" case '(' => "-" case ')' => "-" case '[' => ":A" } }) } private def toAndroidAnchor(anchor: String): String = anchor.replace(",", ", ") private def resolveWellKnownUri(sig: DocSigPair): Option[String] = { if (sig.java.fqn.javaStdLib) { val path = javaFqnToPath(sig.java.fqn) val rawVersion = forceJavaVersion.getOrElse(scala.util.Properties.javaVersion) val version = if (rawVersion.startsWith("1.8")) "8" else if (rawVersion.startsWith("1.7")) "7" else "6" val anchor = sig.java.member.map { m => "#" + { if (version == "8") toJava8Anchor(m) else m } }.getOrElse("") Some(s"http://docs.oracle.com/javase/$version/docs/api/$path$anchor") } else if (sig.java.fqn.androidStdLib) { val path = javaFqnToPath(sig.java.fqn) val anchor = sig.java.member.map { m => "#" + toAndroidAnchor(m) }.getOrElse("") Some(s"http://developer.android.com/reference/$path$anchor") } else None } def resolve(sig: DocSigPair): Option[String] = resolveLocalUri(sig) orElse resolveWellKnownUri(sig) // for java stuff, really def resolve(sig: DocSig): Option[String] = resolve(DocSigPair(sig, sig)) def receive: Receive = { case p: DocSigPair => val response = resolve(p) match { case Some(path) => StringResponse(path) case None => FalseResponse } sender() ! response } } object DocResolver { def apply( prefix: String = "docs", java: Option[String] = None )( implicit config: EnsimeConfig ): Props = Props(classOf[DocResolver], prefix, java, config) }
j-mckitrick/ensime-sbt
src/sbt-test/ensime-sbt/ensime-server/core/src/main/scala/org/ensime/core/DocResolver.scala
Scala
apache-2.0
6,287
/* * Copyright 2009-2017. DigitalGlobe, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations under the License. */ package org.mrgeo.mapalgebra.binarymath import org.mrgeo.mapalgebra.parser.{ParserException, ParserNode} import org.mrgeo.mapalgebra.raster.RasterMapOp import org.mrgeo.mapalgebra.{MapOp, MapOpRegistrar} object DivMapOp extends MapOpRegistrar { override def register:Array[String] = { Array[String]("div", "/") } def create(raster:RasterMapOp, const:Double):MapOp = { new DivMapOp(Some(raster), Some(const)) } // rcreate's parameters (name, type, & order) must be the same as creates def rcreate(raster:RasterMapOp, const:Double):MapOp = { new DivMapOp(Some(raster), Some(const), true) } def create(rasterA:RasterMapOp, rasterB:RasterMapOp):MapOp = { new DivMapOp(Some(rasterA), Some(rasterB)) } override def apply(node:ParserNode, variables:String => Option[ParserNode]):MapOp = new DivMapOp(node, variables) } class DivMapOp extends RawBinaryMathMapOp { private[binarymath] def this(raster:Option[RasterMapOp], paramB:Option[Any], reverse:Boolean = false) = { this() if (reverse) { varB = raster paramB match { case Some(rasterB:RasterMapOp) => varA = Some(rasterB) case Some(double:Double) => constA = Some(double) case Some(int:Int) => constA = Some(int.toDouble) case Some(long:Long) => constA = Some(long.toDouble) case Some(float:Float) => constA = Some(float.toDouble) case Some(short:Short) => constA = Some(short.toDouble) case _ => throw new ParserException(paramB + "\" is not a raster or constant") } } else { varA = raster paramB match { case Some(rasterB:RasterMapOp) => varB = Some(rasterB) case Some(double:Double) => constB = Some(double) case Some(int:Int) => constB = Some(int.toDouble) case Some(long:Long) => constB = Some(long.toDouble) case Some(float:Float) => constB = Some(float.toDouble) case Some(short:Short) => constB = Some(short.toDouble) case _ => throw new ParserException(paramB + "\" is not a raster or constant") } } } private[binarymath] def this(node:ParserNode, variables:String => Option[ParserNode]) = { this() initialize(node, variables) } override private[binarymath] def function(a:Double, b:Double):Double = a / b }
ngageoint/mrgeo
mrgeo-mapalgebra/mrgeo-mapalgebra-rastermath/src/main/scala/org/mrgeo/mapalgebra/binarymath/DivMapOp.scala
Scala
apache-2.0
2,908
package io.scalac.wtf.domain import io.scalac.wtf.domain.tables.Users import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers} import slick.driver.H2Driver.api._ import scala.concurrent.ExecutionContext.Implicits.global import slick.driver.H2Driver._ import scala.concurrent.Await import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import UserService.createUser import cats.data.Validated.{Invalid, Valid} import io.scalac.wtf.domain.User.{PasswordTooShort, WrongEmailPattern} import cats.implicits._ import scala.util.Success class UserServiceSpec extends FlatSpec with Matchers with BeforeAndAfter { val db = Database.forConfig("h2mem1") val usersTable = TableQuery[Users] val validEmail = "aaa@scalac.io" val validPassword = "Password123" val validUser = User(email = validEmail, password = validPassword) val noEmailUser = User(email = "", password = validPassword) val noPasswordUser = User(email = validEmail, password = "") val noCredentailsUser = User(email = "", password = "") before { val createSchemaWork = for { _ <- usersTable.schema.create } yield () Await.result(db.run(createSchemaWork), Duration.Inf) } "An UserService" should "create an user when it's valid and e-mail not taken" in { val createUserWork = for { result <- createUser(validUser) } yield result val resultFuture = db.run(createUserWork) resultFuture.onComplete { result => assert(result.isSuccess) result match { case Success(r) => assert(r.isValid) case _ => assert(false) } } } "An UserService" should "not create an user when it's e-mail is not valid" in { val createUserWork = for { result <- createUser(noEmailUser) } yield result val resultFuture = db.run(createUserWork) resultFuture.onComplete { result => assert(result.isSuccess) result match { case Success(r) => assert(r.isInvalid) case _ => assert(false) } } } "An UserService" should "not create an user when it's password is not valid" in { val createUserWork = for { result <- createUser(noPasswordUser) } yield result val resultFuture = db.run(createUserWork) resultFuture.onComplete { result => assert(result.isSuccess) result match { case Success(r) => assert(r.isInvalid) case _ => assert(false) } } } "An UserService" should "not create an user when the credentials are empty" in { val createUserWork = for { result <- createUser(noCredentailsUser) } yield result val resultFuture = db.run(createUserWork) resultFuture.onComplete { result => assert(result.isSuccess) result match { case Success(r) => assert(r.isInvalid) case _ => assert(false) } } } "An UserService" should "not create an user when it's the e-mail is taken" in { val createUserWork = for { _ <- createUser(validUser) result <- createUser(validUser) } yield result val resultFuture = db.run(createUserWork) resultFuture.onComplete { result => assert(result.isSuccess) result match { case Success(r) => assert(r.isInvalid) case _ => assert(false) } } } "An UserService" should "not create an user when the credentials are empty and return 2 validation errors in the result" in { val createUserWork = for { result <- createUser(noCredentailsUser) } yield result val resultFuture = db.run(createUserWork) resultFuture.onComplete { result => assert(result.isSuccess) result match { case Success(r) => { assert(r.isInvalid) r match { case Valid(_) => assert(false) case Invalid(e) => { val errorList = e.unwrap assert(errorList.contains(WrongEmailPattern)) assert(errorList.contains(PasswordTooShort)) } } } case _ => assert(false) } } } after { db.close() } }
ScalaConsultants/whisky-tango-foxtrot
src/test/scala/io/scalac/wtf/domain/UserServiceSpec.scala
Scala
cc0-1.0
4,191
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.types import scala.math.Ordering import scala.reflect.runtime.universe.typeTag import org.apache.spark.annotation.Unstable import org.apache.spark.sql.errors.QueryCompilationErrors import org.apache.spark.sql.types.DayTimeIntervalType.fieldToString /** * The type represents day-time intervals of the SQL standard. A day-time interval is made up * of a contiguous subset of the following fields: * - SECOND, seconds within minutes and possibly fractions of a second [0..59.999999], * - MINUTE, minutes within hours [0..59], * - HOUR, hours within days [0..23], * - DAY, days in the range [0..106751991]. * * `DayTimeIntervalType` represents positive as well as negative day-time intervals. * * @param startField The leftmost field which the type comprises of. Valid values: * 0 (DAY), 1 (HOUR), 2 (MINUTE), 3 (SECOND). * @param endField The rightmost field which the type comprises of. Valid values: * 0 (DAY), 1 (HOUR), 2 (MINUTE), 3 (SECOND). * * @since 3.2.0 */ @Unstable case class DayTimeIntervalType(startField: Byte, endField: Byte) extends AtomicType { /** * Internally, values of day-time intervals are stored in `Long` values as amount of time in terms * of microseconds that are calculated by the formula: * -/+ (24*60*60 * DAY + 60*60 * HOUR + 60 * MINUTE + SECOND) * 1000000 */ private[sql] type InternalType = Long @transient private[sql] lazy val tag = typeTag[InternalType] private[sql] val ordering = implicitly[Ordering[InternalType]] /** * The day-time interval type has constant precision. A value of the type always occupies 8 bytes. * The DAY field is constrained by the upper bound 106751991 to fit to `Long`. */ override def defaultSize: Int = 8 private[spark] override def asNullable: DayTimeIntervalType = this override val typeName: String = { val startFieldName = fieldToString(startField) val endFieldName = fieldToString(endField) if (startFieldName == endFieldName) { s"interval $startFieldName" } else if (startField < endField) { s"interval $startFieldName to $endFieldName" } else { throw QueryCompilationErrors.invalidDayTimeIntervalType(startFieldName, endFieldName) } } } /** * Extra factory methods and pattern matchers for DayTimeIntervalType. * * @since 3.2.0 */ @Unstable case object DayTimeIntervalType extends AbstractDataType { val DAY: Byte = 0 val HOUR: Byte = 1 val MINUTE: Byte = 2 val SECOND: Byte = 3 val dayTimeFields = Seq(DAY, HOUR, MINUTE, SECOND) def fieldToString(field: Byte): String = field match { case DAY => "day" case HOUR => "hour" case MINUTE => "minute" case SECOND => "second" case invalid => throw QueryCompilationErrors.invalidDayTimeField(invalid) } val stringToField: Map[String, Byte] = dayTimeFields.map(i => fieldToString(i) -> i).toMap val DEFAULT = DayTimeIntervalType(DAY, SECOND) def apply(): DayTimeIntervalType = DEFAULT def apply(field: Byte): DayTimeIntervalType = DayTimeIntervalType(field, field) override private[sql] def defaultConcreteType: DataType = DEFAULT override private[sql] def acceptsType(other: DataType): Boolean = { other.isInstanceOf[DayTimeIntervalType] } override private[sql] def simpleString: String = defaultConcreteType.simpleString }
wangmiao1981/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/types/DayTimeIntervalType.scala
Scala
apache-2.0
4,186