• 如果您觉得本站非常有看点,那么赶紧使用Ctrl+D 收藏吧

Java JavaSQLContext类的典型用法和代码示例

java 1次浏览

本文整理汇总了Java中org.apache.spark.sql.api.java.JavaSQLContext的典型用法代码示例。如果您正苦于以下问题:Java JavaSQLContext类的具体用法?Java JavaSQLContext怎么用?Java JavaSQLContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。

JavaSQLContext类属于org.apache.spark.sql.api.java包,在下文中一共展示了JavaSQLContext类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: setup

点赞 2

import org.apache.spark.sql.api.java.JavaSQLContext; //导入依赖的package包/类
@BeforeClass
public static void setup() {
    sc = new JavaSparkContext(conf);
    sqc = new JavaSQLContext(sc);
}
 

开发者ID:xushjie1987,
项目名称:es-hadoop-v2.2.0,
代码行数:6,
代码来源:AbstractJavaEsSparkSQLTest.java

示例2: main

点赞 2

import org.apache.spark.sql.api.java.JavaSQLContext; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        // Arrancamos el contexto de ejecucion de Apache Spark
        SparkConf sparkConf = new SparkConf().setAppName("Inferring the Schema Using Reflection");
        JavaSparkContext ctx = new JavaSparkContext(sparkConf);
        JavaSQLContext sqlCtx = new JavaSQLContext(ctx);

        System.out.println("=== Data source: RDD ===");

        // Cargamos los datos a partir de un fichero de texto y los mapeamos a 
        // instancia de tipo Person.
        JavaRDD<Person> people = ctx.textFile("data/people.txt").map(
                new Function<String, Person>() {
                    @Override
                    public Person call(String line) {
                        String[] parts = line.split(",");

                        Person person = new Person();
                        person.setName(parts[0]);
                        person.setAge(Integer.parseInt(parts[1].trim()));

                        return person;
                    }
                });

        // Creamos el esquema a partir de los datos importados y lo registramos
        // como una tabla.
        JavaSchemaRDD schemaPeople = sqlCtx.applySchema(people, Person.class);
        schemaPeople.registerTempTable("people");

        // SQL can be run over RDDs that have been registered as tables.
        JavaSchemaRDD teenagers = sqlCtx.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19");

        // The results of SQL queries are SchemaRDDs and support all the normal RDD operations.
        // The columns of a row in the result can be accessed by ordinal.
        List<String> teenagerNames = teenagers.map(new Function<Row, String>() {
            @Override
            public String call(Row row) {
                return "Name: " + row.getString(0);
            }
        }).collect();

        // Sacamos por pantalla los resultados de la query
        for (String name : teenagerNames) {
            System.out.println(name);
        }
        
        // Paramos el contexto.
        ctx.stop();
    }
 

开发者ID:adriannovegil,
项目名称:apache-spark-examples,
代码行数:51,
代码来源:J00InferringSchReflection.java

示例3: main

点赞 2

import org.apache.spark.sql.api.java.JavaSQLContext; //导入依赖的package包/类
public static void main(String[] args) throws Exception {

        // Arrancamos el contexto de ejecucion de Apache Spark
        SparkConf sparkConf = new SparkConf().setAppName("Parquet Files");
        JavaSparkContext ctx = new JavaSparkContext(sparkConf);
        JavaSQLContext sqlCtx = new JavaSQLContext(ctx);

        System.out.println("=== Data source: RDD ===");

        // Cargamos los datos a partir de un fichero de texto y los mapeamos a 
        // instancia de tipo Person.
        JavaRDD<Person> people = ctx.textFile("data/people.txt").map(
                new Function<String, Person>() {
                    @Override
                    public Person call(String line) {
                        String[] parts = line.split(",");

                        Person person = new Person();
                        person.setName(parts[0]);
                        person.setAge(Integer.parseInt(parts[1].trim()));

                        return person;
                    }
                });

        // Creamos el esquema a partir de los datos importados y lo registramos
        // como una tabla.
        JavaSchemaRDD schemaPeople = sqlCtx.applySchema(people, Person.class);

        System.out.println("=== Data source: Parquet File ===");

        // JavaSchemaRDDs can be saved as parquet files, maintaining the schema information.
        schemaPeople.saveAsParquetFile("people.parquet");

        // Read in the parquet file created above.
        // Parquet files are self-describing so the schema is preserved.
        // The result of loading a parquet file is also a JavaSchemaRDD.
        JavaSchemaRDD parquetFile = sqlCtx.parquetFile("people.parquet");

        //Parquet files can also be registered as tables and then used in SQL statements.
        parquetFile.registerTempTable("parquetFile");
        
        JavaSchemaRDD teenagers2 = sqlCtx.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
        
        List<String> teenagerNames = teenagers2.map(new Function<Row, String>() {
            @Override
            public String call(Row row) {
                return "Name: " + row.getString(0);
            }
        }).collect();
        
        // Sacamos por pantalla los resultados de la query
        for (String name : teenagerNames) {
            System.out.println(name);
        }

        // Paramos el contexto.
        ctx.stop();
    }
 

开发者ID:adriannovegil,
项目名称:apache-spark-examples,
代码行数:60,
代码来源:J02ParquetFiles.java

示例4: SqlRunner

点赞 2

import org.apache.spark.sql.api.java.JavaSQLContext; //导入依赖的package包/类
private SqlRunner(SparkConf conf) {
    this.conf = conf;
    context = new JavaSparkContext(conf);
    sqlContext = new JavaSQLContext(context);
}
 

开发者ID:boneill42,
项目名称:memnon,
代码行数:6,
代码来源:SqlRunner.java

示例5: getSchemaRDD

点赞 1

import org.apache.spark.sql.api.java.JavaSQLContext; //导入依赖的package包/类
private JavaSchemaRDD  getSchemaRDD(JavaSQLContext sqlContext, String command) throws ParseException, CommandException, IOException{
	CommandParser parser = new CommandParser(command);

	Search search = new Search(parser, client, logger);
	
	if(search.indices.length == 0 || search.indices.length > 1)
		throw new InvalidParameterException(String.format("indices.length = %d", search.indices.length));
	
	if(search.sourceTypes.length == 0 || search.sourceTypes.length > 1)
		throw new InvalidParameterException(String.format("sourceTypes.length = %d", search.sourceTypes.length));
	
	String query = search.querySearch.toString();
	
	return JavaEsSparkSQL.esRDD(sqlContext, String.format("%s/%s", search.indices[0], search.sourceTypes[0]), query);
}
 

开发者ID:huangchen007,
项目名称:elasticsearch-rest-command,
代码行数:16,
代码来源:TaskRestHandler.java


版权声明:本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系管理员进行删除。
喜欢 (0)