ビッグデータテスト:sparkによるテーブル内のデータの分割
717 ワード
# coding:utf-8
from pyspark.sql import SparkSession
import os
if __name__ == '__main__':
os.environ['JAVA_HOME'] = 'C:\Program Files\Java\jdk1.8.0_211'
print(os.path)
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
path = "C:/Users/Administrator/Desktop/vedios.txt"
df = spark.read.csv(path, header=True)
df.createOrReplaceTempView("movies")
df = spark.sql("select * from movies lateral view explode(split(category,'#')) as type1")
df.show()