diff --git a/gradle.properties b/gradle.properties index 9e3077929..3d2fa5308 100644 --- a/gradle.properties +++ b/gradle.properties @@ -39,6 +39,8 @@ scala211Version = 2.11.12 scala211MajorVersion = 2.11 scala212Version = 2.12.8 scala212MajorVersion = 2.12 +scala213Version = 2.13.6 +scala213MajorVersion = 2.13 stormVersion = 1.0.6 diff --git a/spark/core/build.gradle b/spark/core/build.gradle index 1bc75cbe4..efee6dc04 100644 --- a/spark/core/build.gradle +++ b/spark/core/build.gradle @@ -10,6 +10,7 @@ apply plugin: 'spark.variants' sparkVariants { capabilityGroup 'org.elasticsearch.spark.variant' setCoreDefaultVariant "spark20scala212", spark24Version, scala212Version + addCoreFeatureVariant "spark30scala213", spark30Version, scala213Version addCoreFeatureVariant "spark30scala212", spark30Version, scala212Version addCoreFeatureVariant "spark20scala211", spark24Version, scala211Version addCoreFeatureVariant "spark20scala210", spark22Version, scala210Version @@ -50,7 +51,7 @@ sparkVariants { add(variant.configuration('implementation'), project(":elasticsearch-hadoop-mr")) add(variant.configuration('implementation'), "commons-logging:commons-logging:1.1.1") - add(variant.configuration('compileOnly'), "com.fasterxml.jackson.module:jackson-module-scala_${variant.scalaMajorVersion}:2.6.7.1") + add(variant.configuration('compileOnly'), "com.fasterxml.jackson.module:jackson-module-scala_${variant.scalaMajorVersion}:2.9.10") add(variant.configuration('compileOnly'), "com.fasterxml.jackson.core:jackson-annotations:2.6.7") add(variant.configuration('compileOnly'), "com.google.guava:guava:14.0.1") add(variant.configuration('compileOnly'), "com.google.protobuf:protobuf-java:2.5.0") @@ -100,7 +101,7 @@ sparkVariants { String generatedJavaDirectory = "$buildDir/generated/java/${variant.name}" Configuration scalaCompilerPlugin = project.configurations.maybeCreate(variant.configuration('scalaCompilerPlugin')) scalaCompilerPlugin.defaultDependencies { dependencies -> - dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.13")) + dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.18")) } ScalaCompile compileScala = tasks.getByName(scalaCompileTaskName) as ScalaCompile diff --git a/spark/core/src/main/scala/org/elasticsearch/spark/rdd/AbstractEsRDD.scala b/spark/core/src/main/scala/org/elasticsearch/spark/rdd/AbstractEsRDD.scala index 6d3a91805..559664144 100644 --- a/spark/core/src/main/scala/org/elasticsearch/spark/rdd/AbstractEsRDD.scala +++ b/spark/core/src/main/scala/org/elasticsearch/spark/rdd/AbstractEsRDD.scala @@ -18,8 +18,7 @@ */ package org.elasticsearch.spark.rdd; -import scala.collection.JavaConversions.collectionAsScalaIterable -import scala.collection.JavaConversions.mapAsJavaMap +import JDKCollectionConvertersCompat.Converters._ import scala.reflect.ClassTag import org.apache.commons.logging.LogFactory import org.apache.spark.Partition @@ -45,7 +44,7 @@ private[spark] abstract class AbstractEsRDD[T: ClassTag]( @transient protected lazy val logger = LogFactory.getLog(this.getClass()) override def getPartitions: Array[Partition] = { - esPartitions.zipWithIndex.map { case(esPartition, idx) => + esPartitions.asScala.zipWithIndex.map { case(esPartition, idx) => new EsPartition(id, idx, esPartition) }.toArray } @@ -70,7 +69,7 @@ private[spark] abstract class AbstractEsRDD[T: ClassTag]( @transient private[spark] lazy val esCfg = { val cfg = new SparkSettingsManager().load(sc.getConf).copy(); - cfg.merge(params) + cfg.merge(params.asJava) InitializationUtils.setUserProviderIfNotSet(cfg, classOf[HadoopUserProvider], logger) cfg } diff --git a/spark/core/src/main/scala/org/elasticsearch/spark/rdd/JDKCollectionConvertersCompat.scala b/spark/core/src/main/scala/org/elasticsearch/spark/rdd/JDKCollectionConvertersCompat.scala new file mode 100644 index 000000000..c32b379ea --- /dev/null +++ b/spark/core/src/main/scala/org/elasticsearch/spark/rdd/JDKCollectionConvertersCompat.scala @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.spark.rdd + +/** + * Magic to get cross-compiling access to scala.jdk.CollectionConverter + * with a fallback on scala.collection.JavaConverters, without deprecation + * warning in any Scala version. + * From https://github.com/scala/scala-collection-compat/issues/208#issuecomment-497735669 + */ +private[rdd] object JDKCollectionConvertersCompat { + object Scope1 { + object jdk { + type CollectionConverters = Int + } + } + import Scope1._ + + object Scope2 { + import scala.collection.{JavaConverters => CollectionConverters} + object Inner { + import scala._ + import jdk.CollectionConverters + val Converters = CollectionConverters + } + } + + val Converters = Scope2.Inner.Converters +} \ No newline at end of file diff --git a/spark/core/src/main/scala/org/elasticsearch/spark/serialization/ScalaMapFieldExtractor.scala b/spark/core/src/main/scala/org/elasticsearch/spark/serialization/ScalaMapFieldExtractor.scala index 8f87a806b..2dc5047ed 100644 --- a/spark/core/src/main/scala/org/elasticsearch/spark/serialization/ScalaMapFieldExtractor.scala +++ b/spark/core/src/main/scala/org/elasticsearch/spark/serialization/ScalaMapFieldExtractor.scala @@ -18,10 +18,8 @@ */ package org.elasticsearch.spark.serialization -import scala.collection.GenMapLike import scala.collection.Map -import org.elasticsearch.hadoop.serialization.field.ConstantFieldExtractor import org.elasticsearch.hadoop.serialization.MapFieldExtractor import org.elasticsearch.hadoop.serialization.field.FieldExtractor._ import org.elasticsearch.spark.serialization.{ ReflectionUtils => RU } diff --git a/spark/sql-13/build.gradle b/spark/sql-13/build.gradle index 878768605..0a97d75a6 100644 --- a/spark/sql-13/build.gradle +++ b/spark/sql-13/build.gradle @@ -125,7 +125,7 @@ sparkVariants { String generatedJavaDirectory = "$buildDir/generated/java/${variant.name}" Configuration scalaCompilerPlugin = project.configurations.maybeCreate(variant.configuration('scalaCompilerPlugin')) scalaCompilerPlugin.defaultDependencies { dependencies -> - dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.13")) + dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.18")) } ScalaCompile compileScala = tasks.getByName(scalaCompileTaskName) as ScalaCompile diff --git a/spark/sql-20/build.gradle b/spark/sql-20/build.gradle index 92aea086f..b2a7dac06 100644 --- a/spark/sql-20/build.gradle +++ b/spark/sql-20/build.gradle @@ -144,7 +144,7 @@ sparkVariants { String generatedJavaDirectory = "$buildDir/generated/java/${variant.name}" Configuration scalaCompilerPlugin = project.configurations.maybeCreate(variant.configuration('scalaCompilerPlugin')) scalaCompilerPlugin.defaultDependencies { dependencies -> - dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.13")) + dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.18")) } ScalaCompile compileScala = tasks.getByName(scalaCompileTaskName) as ScalaCompile diff --git a/spark/sql-30/build.gradle b/spark/sql-30/build.gradle index c786c1f4b..e8512aae7 100644 --- a/spark/sql-30/build.gradle +++ b/spark/sql-30/build.gradle @@ -10,6 +10,7 @@ apply plugin: 'spark.variants' sparkVariants { capabilityGroup 'org.elasticsearch.spark.sql.variant' setDefaultVariant "spark30scala212", spark30Version, scala212Version + addFeatureVariant "spark30scala213", spark30Version, scala213Version all { SparkVariantPlugin.SparkVariant variant -> String scalaCompileTaskName = project.sourceSets @@ -130,7 +131,7 @@ sparkVariants { String generatedJavaDirectory = "$buildDir/generated/java/${variant.name}" Configuration scalaCompilerPlugin = project.configurations.maybeCreate(variant.configuration('scalaCompilerPlugin')) scalaCompilerPlugin.defaultDependencies { dependencies -> - dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.13")) + dependencies.add(project.dependencies.create("com.typesafe.genjavadoc:genjavadoc-plugin_${variant.scalaVersion}:0.18")) } ScalaCompile compileScala = tasks.getByName(scalaCompileTaskName) as ScalaCompile diff --git a/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/DefaultSource.scala b/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/DefaultSource.scala index bf527a730..2a2d6c2d5 100644 --- a/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/DefaultSource.scala +++ b/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/DefaultSource.scala @@ -344,7 +344,7 @@ private[sql] case class ElasticsearchRelation(parameters: Map[String, String], @ case EqualTo(attribute, value) => { // if we get a null, translate it into a missing query (we're extra careful - Spark should translate the equals into isMissing anyway) - if (value == null || value == None || value == Unit) { + if (value == null || value == None || value == ()) { if (isES50) { s"""{"bool":{"must_not":{"exists":{"field":"$attribute"}}}}""" } diff --git a/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/SchemaUtils.scala b/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/SchemaUtils.scala index 137b98a89..62fe2c346 100644 --- a/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/SchemaUtils.scala +++ b/spark/sql-30/src/main/scala/org/elasticsearch/spark/sql/SchemaUtils.scala @@ -259,7 +259,7 @@ private[sql] object SchemaUtils { for (prop <- rowOrderProps.asScala) { val value = StringUtils.tokenize(prop._2).asScala if (!value.isEmpty) { - order.put(prop._1, new ArrayBuffer() ++= value) + order.put(prop._1, value.toSeq) } }