class
JdbcWriter extends AnyRef
Instance Constructors
-
Value Members
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: Any): Boolean
-
final
def
asInstanceOf[T0]: T0
-
def
clone(): AnyRef
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
def
finalize(): Unit
-
final
def
getClass(): Class[_]
-
def
hashCode(): Int
-
final
def
isInstanceOf[T0]: Boolean
-
final
def
ne(arg0: AnyRef): Boolean
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
def
snappy(table: String, connectionProperties: Map[String, String], mode: SaveMode): Unit
-
def
snappy(table: String, connectionProperties: Map[String, String]): Unit
-
def
snappy(table: String): Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
toString(): String
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
Implicit class to easily invoke DataFrameWriter operations on SnappyData's JDBC provider.
Instead of: spark.write.jdbc(url, table, properties) one can simply do spark.write.snappy(table). This will also register dialects for proper type conversions, use proper JDBC driver argument to avoid ClassNotFound errors.
In future this will also provide spark.write.snappyPut(table) to perform a PUT INTO.