Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ val scalatest = "3.0.1"
val shapeless = "2.3.2"
val scalacheck = "1.13.4"

// spark has scalatest and scalactic as a runtime dependency
// which can mess things up if you use a different version in your project
val exclusions = Seq(ExclusionRule("org.scalatest"), ExclusionRule("org.scalactic"))
Copy link
Contributor

@imarios imarios Feb 20, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this fixs the IntelliJ issue with ScalaTest?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep!


lazy val root = Project("frameless", file("." + "frameless")).in(file("."))
.aggregate(core, cats, dataset, docs)
.settings(framelessSettings: _*)
Expand All @@ -22,7 +26,7 @@ lazy val cats = project
.settings(publishSettings: _*)
.settings(libraryDependencies ++= Seq(
"org.typelevel" %% "cats" % catsv,
"org.apache.spark" %% "spark-core" % sparkVersion % "provided"))
"org.apache.spark" %% "spark-core" % sparkVersion % "provided" excludeAll(exclusions: _*)))

lazy val dataset = project
.settings(name := "frameless-dataset")
Expand All @@ -31,8 +35,8 @@ lazy val dataset = project
.settings(framelessTypedDatasetREPL: _*)
.settings(publishSettings: _*)
.settings(libraryDependencies ++= Seq(
"org.apache.spark" %% "spark-core" % sparkVersion % "provided",
"org.apache.spark" %% "spark-sql" % sparkVersion % "provided"
"org.apache.spark" %% "spark-core" % sparkVersion % "provided" excludeAll(exclusions: _*),
"org.apache.spark" %% "spark-sql" % sparkVersion % "provided" excludeAll(exclusions: _*)
))
.dependsOn(core % "test->test;compile->compile")

Expand Down
3 changes: 2 additions & 1 deletion dataset/src/main/scala/frameless/TypedColumn.scala
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ object TypedColumn {
lgen: LabelledGeneric.Aux[T, H],
selector: Selector.Aux[H, K, V]
): Exists[T, K, V] = new Exists[T, K, V] {}

}

implicit class OrderedTypedColumnSyntax[T, U: CatalystOrdered](col: TypedColumn[T, U]) {
Expand All @@ -279,4 +280,4 @@ object TypedColumn {
def >(other: TypedColumn[T, U]): TypedColumn[T, Boolean] = (col.untyped > other.untyped).typed
def >=(other: TypedColumn[T, U]): TypedColumn[T, Boolean] = (col.untyped >= other.untyped).typed
}
}
}
9 changes: 5 additions & 4 deletions dataset/src/main/scala/frameless/TypedDataset.scala
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,9 @@ class TypedDataset[T] protected[frameless](val dataset: Dataset[T])(implicit val
*
* It is statically checked that column with such name exists and has type `A`.
*/
def apply[A](column: Witness.Lt[Symbol])(
implicit
exists: TypedColumn.Exists[T, column.T, A],
def apply[A](selector: T => A)(implicit
encoder: TypedEncoder[A]
): TypedColumn[T, A] = col(column)
): TypedColumn[T, A] = macro frameless.macros.ColumnMacros.fromFunction[T, A]

/** Returns `TypedColumn` of type `A` given it's name.
*
Expand Down Expand Up @@ -319,6 +317,9 @@ class TypedDataset[T] protected[frameless](val dataset: Dataset[T])(implicit val
}
}

def selectExpr[B](expr: T => B)(implicit encoder: TypedEncoder[B]): TypedDataset[B] =
macro frameless.macros.ColumnMacros.fromExpr[T, B]

/** Type-safe projection from type T to Tuple2[A,B]
* {{{
* d.select( d('a), d('a)+d('b), ... )
Expand Down
Loading