Packages

case class SnowflakeTableDataObject(id: DataObjectId, schemaMin: Option[StructType] = None, table: Table, saveMode: SDLSaveMode = SDLSaveMode.Overwrite, connectionId: ConnectionId, comment: Option[String], metadata: Option[DataObjectMetadata] = None)(implicit instanceRegistry: InstanceRegistry) extends TransactionalSparkTableDataObject with Product with Serializable

DataObject of type SnowflakeTableDataObject. Provides details to access Snowflake tables via an action

id

unique name of this data object

schemaMin

An optional, minimal schema that this DataObject must have to pass schema validation on reading and writing.

table

Snowflake table to be written by this output

saveMode

spark SDLSaveMode to use when writing files, default is "overwrite"

connectionId

The SnowflakeTableConnection to use for the table

comment

An optional comment to add to the table after writing a DataFrame to it

metadata

meta data

Linear Supertypes
Serializable, Serializable, Product, Equals, TransactionalSparkTableDataObject, CanWriteDataFrame, TableDataObject, SchemaValidation, CanCreateDataFrame, DataObject, AtlasExportable, SmartDataLakeLogger, ParsableFromConfig[DataObject], SdlConfigObject, AnyRef, Any
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. SnowflakeTableDataObject
  2. Serializable
  3. Serializable
  4. Product
  5. Equals
  6. TransactionalSparkTableDataObject
  7. CanWriteDataFrame
  8. TableDataObject
  9. SchemaValidation
  10. CanCreateDataFrame
  11. DataObject
  12. AtlasExportable
  13. SmartDataLakeLogger
  14. ParsableFromConfig
  15. SdlConfigObject
  16. AnyRef
  17. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Instance Constructors

  1. new SnowflakeTableDataObject(id: DataObjectId, schemaMin: Option[StructType] = None, table: Table, saveMode: SDLSaveMode = SDLSaveMode.Overwrite, connectionId: ConnectionId, comment: Option[String], metadata: Option[DataObjectMetadata] = None)(implicit instanceRegistry: InstanceRegistry)

    id

    unique name of this data object

    schemaMin

    An optional, minimal schema that this DataObject must have to pass schema validation on reading and writing.

    table

    Snowflake table to be written by this output

    saveMode

    spark SDLSaveMode to use when writing files, default is "overwrite"

    connectionId

    The SnowflakeTableConnection to use for the table

    comment

    An optional comment to add to the table after writing a DataFrame to it

    metadata

    meta data

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##(): Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. def addFieldIfNotExisting(writeSchema: StructType, colName: String, dataType: DataType): StructType
    Attributes
    protected
    Definition Classes
    CanCreateDataFrame
  5. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  6. def atlasName: String
    Definition Classes
    TableDataObject → DataObject → AtlasExportable
  7. def atlasQualifiedName(prefix: String): String
    Definition Classes
    TableDataObject → AtlasExportable
  8. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  9. val comment: Option[String]
  10. val connectionId: ConnectionId
  11. def createReadSchema(writeSchema: StructType)(implicit session: SparkSession): StructType
    Definition Classes
    CanCreateDataFrame
  12. def dropTable(implicit session: SparkSession): Unit
    Definition Classes
    SnowflakeTableDataObject → TableDataObject
  13. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  14. def factory: FromConfigFactory[DataObject]
    Definition Classes
    SnowflakeTableDataObject → ParsableFromConfig
  15. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  16. final def getClass(): Class[_]
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  17. def getConnection[T <: Connection](connectionId: ConnectionId)(implicit registry: InstanceRegistry, ct: ClassTag[T], tt: scala.reflect.api.JavaUniverse.TypeTag[T]): T
    Attributes
    protected
    Definition Classes
    DataObject
  18. def getConnectionReg[T <: Connection](connectionId: ConnectionId, registry: InstanceRegistry)(implicit ct: ClassTag[T], tt: scala.reflect.api.JavaUniverse.TypeTag[T]): T
    Attributes
    protected
    Definition Classes
    DataObject
  19. def getDataFrame(partitionValues: Seq[PartitionValues] = Seq())(implicit session: SparkSession, context: ActionPipelineContext): DataFrame
    Definition Classes
    SnowflakeTableDataObject → CanCreateDataFrame
  20. def getPKduplicates(implicit session: SparkSession, context: ActionPipelineContext): DataFrame
    Definition Classes
    TableDataObject
  21. def getPKnulls(implicit session: SparkSession, context: ActionPipelineContext): DataFrame
    Definition Classes
    TableDataObject
  22. def getPKviolators(implicit session: SparkSession, context: ActionPipelineContext): DataFrame
    Definition Classes
    TableDataObject
  23. def housekeepingMode: Option[HousekeepingMode]
    Definition Classes
    DataObject
  24. val id: DataObjectId
    Definition Classes
    SnowflakeTableDataObject → DataObject → SdlConfigObject
  25. def init(df: DataFrame, partitionValues: Seq[PartitionValues], saveModeOptions: Option[SaveModeOptions])(implicit session: SparkSession, context: ActionPipelineContext): Unit
    Definition Classes
    CanWriteDataFrame
  26. implicit val instanceRegistry: InstanceRegistry
  27. def isDbExisting(implicit session: SparkSession): Boolean
    Definition Classes
    SnowflakeTableDataObject → TableDataObject
  28. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  29. def isPKcandidateKey(implicit session: SparkSession, context: ActionPipelineContext): Boolean
    Definition Classes
    TableDataObject
  30. def isTableExisting(implicit session: SparkSession): Boolean
    Definition Classes
    SnowflakeTableDataObject → TableDataObject
  31. lazy val logger: Logger
    Attributes
    protected
    Definition Classes
    SmartDataLakeLogger
    Annotations
    @transient()
  32. val metadata: Option[DataObjectMetadata]
    Definition Classes
    SnowflakeTableDataObject → DataObject
  33. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  34. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  35. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  36. val saveMode: SDLSaveMode
  37. val schemaMin: Option[StructType]
    Definition Classes
    SnowflakeTableDataObject → SchemaValidation
  38. def streamingOptions: Map[String, String]
    Definition Classes
    CanWriteDataFrame
  39. final def synchronized[T0](arg0: ⇒ T0): T0
    Definition Classes
    AnyRef
  40. var table: Table
    Definition Classes
    SnowflakeTableDataObject → TableDataObject
  41. val tableSchema: StructType
    Definition Classes
    TableDataObject
  42. def toStringShort: String
    Definition Classes
    DataObject
  43. def validateSchema(df: DataFrame, schemaExpected: StructType, role: String): Unit
    Definition Classes
    SchemaValidation
  44. def validateSchemaMin(df: DataFrame, role: String): Unit
    Definition Classes
    SchemaValidation
  45. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  46. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  47. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  48. def writeDataFrame(df: DataFrame, createTableOnly: Boolean, partitionValues: Seq[PartitionValues], saveModeOptions: Option[SaveModeOptions])(implicit session: SparkSession): Unit

    Writes DataFrame to Snowflake Snowflake does not support explicit partitions, so any passed partition values are ignored

  49. def writeDataFrame(df: DataFrame, partitionValues: Seq[PartitionValues] = Seq(), isRecursiveInput: Boolean = false, saveModeOptions: Option[SaveModeOptions] = None)(implicit session: SparkSession, context: ActionPipelineContext): Unit
    Definition Classes
    SnowflakeTableDataObject → CanWriteDataFrame
  50. def writeStreamingDataFrame(df: DataFrame, trigger: Trigger, options: Map[String, String], checkpointLocation: String, queryName: String, outputMode: OutputMode, saveModeOptions: Option[SaveModeOptions])(implicit session: SparkSession, context: ActionPipelineContext): StreamingQuery
    Definition Classes
    CanWriteDataFrame

Inherited from Serializable

Inherited from Serializable

Inherited from Product

Inherited from Equals

Inherited from TransactionalSparkTableDataObject

Inherited from CanWriteDataFrame

Inherited from TableDataObject

Inherited from SchemaValidation

Inherited from CanCreateDataFrame

Inherited from DataObject

Inherited from AtlasExportable

Inherited from SmartDataLakeLogger

Inherited from ParsableFromConfig[DataObject]

Inherited from SdlConfigObject

Inherited from AnyRef

Inherited from Any

Ungrouped