Spark 整合hive

    1.hive的類庫需要在spark worker節點。
        默認spark中包含了hive類庫
    2.複製core-site.xml(hdfs) + hdfs-site.xml(hdfs) + hive-site.xml(hive)三個文件
      到spark/conf下。
        cp /soft/hive/conf/hive-site.xml /soft/spark/conf/

    3.複製mysql驅動程序到/soft/spark/jars下
        cp /soft/hive/lib/mysql-connector-java-5.1.47.jar /soft/spark/jars/
    
    4.啓動spark-shell,指定啓動模式
        spark-shell --master local[4]
        $scala>
create table tt(id int,name string , age int) row format delimited fields terminated by ','  lines terminated by '\n' stored as textfile ;

        //加載數據到hive表
        $scala>spark.sql("load data local inpath 'file:///home/centos/data.txt' into table mydb.tt");

 

java版的SparkSQL操縱hive表

1.複製配置文件到resources目錄下
        core-site.xml
        hdfs-site.xml
        hive-site.xml
    2.pom.xml增加依賴

<?xml version="1.0" encoding="UTF-8"?>
		<project xmlns="http://maven.apache.org/POM/4.0.0"
				 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
				 xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
			<modelVersion>4.0.0</modelVersion>
			<groupId>com.it18zhang</groupId>
			<artifactId>SparkDemo1</artifactId>
			<version>1.0-SNAPSHOT</version>
			<build>
				<sourceDirectory>src/main/java</sourceDirectory>
				<plugins>
					<plugin>
						<groupId>org.apache.maven.plugins</groupId>
						<artifactId>maven-compiler-plugin</artifactId>
						<configuration>
							<source>1.8</source>
							<target>1.8</target>
						</configuration>
					</plugin>
					<plugin>
						<groupId>net.alchim31.maven</groupId>
						<artifactId>scala-maven-plugin</artifactId>
						<version>3.2.2</version>
						<configuration>
							<recompileMode>incremental</recompileMode>
						</configuration>
						<executions>
							<execution>
								<goals>
									<goal>compile</goal>
									<goal>testCompile</goal>
								</goals>
							</execution>
						</executions>
					</plugin>
				</plugins>
			</build>
			<dependencies>
				<dependency>
					<groupId>org.apache.spark</groupId>
					<artifactId>spark-core_2.11</artifactId>
					<version>2.1.0</version>
				</dependency>
				<dependency>
					<groupId>org.apache.spark</groupId>
					<artifactId>spark-mllib_2.11</artifactId>
					<version>2.1.0</version>
				</dependency>
				<dependency>
					<groupId>mysql</groupId>
					<artifactId>mysql-connector-java</artifactId>
					<version>5.1.17</version>
				</dependency>
				<dependency>
					<groupId>org.apache.spark</groupId>
					<artifactId>spark-sql_2.11</artifactId>
					<version>2.1.0</version>
				</dependency>
			</dependencies>
		</project>

類:

package com.mao.scala.java;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.Properties;

/**
 * spark hive
 */
public class SQLHiveJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf();
        conf.setMaster("local") ;
        conf.setAppName("SQLJava");
        SparkSession sess = SparkSession.builder()
                            .appName("HiveSQLJava")
                            .config("spark.master","local")
                            .getOrCreate();
        Dataset<Row> df = sess.sql("create table tttt(id int,name string,age int)");
        df.show();

    }
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章