From e6ee018d9bf490a990f0fe0e0c1d07fb29be0540 Mon Sep 17 00:00:00 2001 From: DuyHai DOAN Date: Mon, 26 Jan 2015 23:49:36 +0100 Subject: [PATCH] Fix some weird compilation issue with some columns --- src/main/scala/us/unemployment/demo/FromCSVToCassandra.scala | 5 +++-- .../scala/weather/data/demo/WeatherDataIntoCassandra.scala | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/main/scala/us/unemployment/demo/FromCSVToCassandra.scala b/src/main/scala/us/unemployment/demo/FromCSVToCassandra.scala index a56c021..eb6dd3e 100644 --- a/src/main/scala/us/unemployment/demo/FromCSVToCassandra.scala +++ b/src/main/scala/us/unemployment/demo/FromCSVToCassandra.scala @@ -2,6 +2,7 @@ package us.unemployment.demo import com.datastax.spark.connector.{SomeColumns, _} +import com.datastax.spark.connector.SomeColumns import org.apache.spark.{SparkConf, SparkContext} object FromCSVToCassandra { @@ -37,7 +38,7 @@ object FromCSVToCassandra { Footnotes */ val CSV: String = "src/main/data/us_unemployment.csv" - val TABLE_COLUMNS = Seq("year", "civil_non_institutional_count", "civil_labor_count", "labor_population_percentage", + val TABLE_COLUMNS = SomeColumns("year", "civil_non_institutional_count", "civil_labor_count", "labor_population_percentage", "employed_count", "employed_percentage", "agriculture_part_count", "non_agriculture_part_count", "unemployed_count", "unemployed_percentage_to_labor", "not_labor_count", "footnotes") @@ -63,7 +64,7 @@ object FromCSVToCassandra { lines(4).toInt, lines(5).toDouble, lines(6).toInt, lines(7).toInt, lines(8).toInt, lines(9).toDouble, lines(10).toInt) }} - .saveToCassandra(UsUnemploymentSchema.KEYSPACE, UsUnemploymentSchema.TABLE, SomeColumns(TABLE_COLUMNS:_*)) + .saveToCassandra(UsUnemploymentSchema.KEYSPACE, UsUnemploymentSchema.TABLE, TABLE_COLUMNS) sc.stop() } diff --git a/src/main/scala/weather/data/demo/WeatherDataIntoCassandra.scala b/src/main/scala/weather/data/demo/WeatherDataIntoCassandra.scala index 23aa5d2..9a4d651 100644 --- a/src/main/scala/weather/data/demo/WeatherDataIntoCassandra.scala +++ b/src/main/scala/weather/data/demo/WeatherDataIntoCassandra.scala @@ -8,7 +8,7 @@ import org.joda.time.Period object WeatherDataIntoCassandra { val WEATHER_2014_CSV: String = "/Users/archinnovinfo/perso/spark_data/Weather Data 2014.csv" - val TABLE_COLUMNS = Seq("weather_station", "year", "month", "day", "hour", + val TABLE_COLUMNS = SomeColumns("weather_station", "year", "month", "day", "hour", "temperature", "dewpoint", "pressure", "wind_direction", "wind_speed", "sky_condition", "sky_condition_text", "one_hour_precip", "six_hour_precip") @@ -43,12 +43,13 @@ object WeatherDataIntoCassandra { lines(5).toFloat, lines(6).toFloat, lines(7).toFloat, lines(8).toFloat.toInt, lines(9).toFloat, skyConditionCode, skyConditionBc.value.get(skyConditionCode), lines(11).toFloat, lines(12).toFloat) }} - .saveToCassandra(WeatherDataSchema.KEYSPACE, WeatherDataSchema.WEATHER_DATA_TABLE, SomeColumns(TABLE_COLUMNS:_*)) + .saveToCassandra(WeatherDataSchema.KEYSPACE, WeatherDataSchema.WEATHER_DATA_TABLE, TABLE_COLUMNS) val endTime = DateTime.now val period: Period = new Period(endTime, startTime) + println(s"Job duration (sec) : ${period.getSeconds}") }