public class Limit extends SparkPlan implements scala.Product, scala.Serializable
Constructor and Description |
---|
Limit(int limit,
SparkPlan child,
SQLContext sqlContext) |
Modifier and Type | Method and Description |
---|---|
SparkPlan |
child() |
RDD<org.apache.spark.sql.catalyst.expressions.Row> |
execute()
Runs this query returning the result as an RDD.
|
org.apache.spark.sql.catalyst.expressions.Row[] |
executeCollect()
A custom implementation modeled after the take function on RDDs but which never runs any job
locally.
|
int |
limit() |
scala.collection.immutable.List<SQLContext> |
otherCopyArgs() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
org.apache.spark.sql.catalyst.plans.physical.Partitioning |
outputPartitioning() |
outputPartitioning, requiredChildDistribution
expressions, generateSchemaString, generateSchemaString, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, schemaString, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, generateTreeString, getNodeNumbered, id, makeCopy, map, mapChildren, nextId, nodeName, numberedTreeString, sameInstance, simpleString, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
public Limit(int limit, SparkPlan child, SQLContext sqlContext)
public int limit()
public SparkPlan child()
public scala.collection.immutable.List<SQLContext> otherCopyArgs()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<SparkPlan>
public org.apache.spark.sql.catalyst.expressions.Row[] executeCollect()
executeCollect
in class SparkPlan
public RDD<org.apache.spark.sql.catalyst.expressions.Row> execute()
SparkPlan
public org.apache.spark.sql.catalyst.plans.physical.Partitioning outputPartitioning()