public class InsertIntoHiveTable extends SparkPlan implements scala.Product, scala.Serializable
Constructor and Description |
---|
InsertIntoHiveTable(org.apache.spark.sql.hive.MetastoreRelation table,
scala.collection.immutable.Map<String,scala.Option<String>> partition,
SparkPlan child,
boolean overwrite,
HiveContext sc) |
Modifier and Type | Method and Description |
---|---|
SparkPlan |
child() |
RDD<org.apache.spark.sql.catalyst.expressions.Row> |
execute()
Runs this query returning the result as an RDD.
|
scala.collection.immutable.List<HiveContext> |
otherCopyArgs() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
Object |
outputClass() |
boolean |
overwrite() |
scala.collection.immutable.Map<String,scala.Option<String>> |
partition() |
void |
saveAsHiveFile(RDD<org.apache.hadoop.io.Writable> rdd,
Class<?> valueClass,
org.apache.hadoop.hive.ql.plan.FileSinkDesc fileSinkConf,
org.apache.hadoop.mapred.JobConf conf,
boolean isCompressed) |
org.apache.spark.sql.hive.MetastoreRelation |
table() |
executeCollect, outputPartitioning, requiredChildDistribution
expressions, generateSchemaString, generateSchemaString, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, schemaString, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, generateTreeString, getNodeNumbered, id, makeCopy, map, mapChildren, nextId, nodeName, numberedTreeString, sameInstance, simpleString, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
public InsertIntoHiveTable(org.apache.spark.sql.hive.MetastoreRelation table, scala.collection.immutable.Map<String,scala.Option<String>> partition, SparkPlan child, boolean overwrite, HiveContext sc)
public org.apache.spark.sql.hive.MetastoreRelation table()
public scala.collection.immutable.Map<String,scala.Option<String>> partition()
public SparkPlan child()
public boolean overwrite()
public Object outputClass()
public scala.collection.immutable.List<HiveContext> otherCopyArgs()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<SparkPlan>
public void saveAsHiveFile(RDD<org.apache.hadoop.io.Writable> rdd, Class<?> valueClass, org.apache.hadoop.hive.ql.plan.FileSinkDesc fileSinkConf, org.apache.hadoop.mapred.JobConf conf, boolean isCompressed)