前提: pyspark driver on k8s,環境變量或者spark_home/jars 下有相關對象存儲的包,報錯包問題就這里添加jar即可
from py4j.java_gateway import java_import
from pyspark.sql import SparkSession# ------------------------------------------------------------------------------
# 1) 啟動 / 獲取 SparkSession
# ------------------------------------------------------------------------------
spark = (SparkSession.builder.enableHiveSupport() # 按需保留;若不用 Hive 可去掉.getOrCreate()
)hconf = spark._jsc.hadoopConfiguration()# ------------------------------------------------------------------------------
# 2) 導入 Hadoop Java 類
# ------------------------------------------------------------------------------
java_import(spark._jvm, "org.apache.hadoop.fs.FileSystem")
java_import(spark._jvm, "org.apache.hadoop.fs.Path")# ------------------------------------------------------------------------------
# 3) 定義源 / 目標路徑
# ------------------------------------------------------------------------------
src_path = spark._jvm.Path("file:///opt/decom.sh")
# 目標寫成目錄即可;FileSystem 會沿用原文件名
dst_dir = spark._jvm.Path("oss://aysh-s-data/tmp1/")# ------------------------------------------------------------------------------
# 4) 獲取 *目標* FileSystem(HDFS)實例
# ------------------------------------------------------------------------------
fs_hdfs = spark._jvm.org.apache.hadoop.fs.FileSystem.get(dst_dir.toUri(), hconf)# 若目錄不存在,先創建
if not fs_hdfs.exists(dst_dir):fs_hdfs.mkdirs(dst_dir)# ------------------------------------------------------------------------------
# 5) 執行復制
# copyFromLocalFile(deleteSource=False, overwrite=True, src, dst)
# 目標是目錄時,自動使用原文件名
# ------------------------------------------------------------------------------
fs_hdfs.copyFromLocalFile(False, False, src_path, dst_dir)print("Done! file:///opt/decom.sh -> oss://aysh-s-data/tmp1/")