Linux | 安裝 lb-toolkits
最近又需要下載葵花的數據,之前分享過一次代碼。今天發現之前的環境不小心被我刪了,而運行相關的代碼需要安裝lb-toolkits這個庫,今天正好記錄了一下安裝lb-toolkits的過程。
這里安裝的版本是1.2.4,別問為什么是這個版本,因為只安到了這個版本才成功。
- https://pypi.org/project/lb-toolkits/1.2.4/#files
本來是直接通過
pip install lb-toolkits==1.2.4
命令直接安裝的,但是發現一直報錯。
安裝過程
下面記錄一下成功安裝的過程
安裝python版本
這里先安裝python=3.11的版本
conda create -n py311 python=3.11
安裝相關依賴
由于lb-toolkits需要相關的依賴環境
庫名 版本 庫名 版本 庫名 版本
numpy 1.2.0 pyhdf 0.10.0 h5py 1.0.0
netcdf4 1.0.0 tqdm 4.0.0 gdal 2.0.0
pillow 7.0.0 paramiko 2.10.0 cdsapi 0.5.0
所以在安裝之前先安裝他的依賴
conda install conda-forge::pyhdf h5py netcdf4 tqdm gdal pillow paramiko cdsapi
安裝lb-toolkits 1.2.4
依賴環境安裝完了,然后再使用pip 安裝具體的版本
pip install lb-toolkits==1.2.4
安裝成功了
測試腳本
運行相關腳本前需要再裝一個庫
conda install bs4
運行相關腳本,下載2023年8月和9月的數據:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 6 14:25:52 2023@author: jianpu"""
import os
import sys
import datetime
import timefrom lb_toolkits.tools import ftppro
#from lb_toolkits.tools import writejsonclass downloadH8(object):def __init__(self, username, password):self.ftp = ftppro(FTPHOST, username, password)def search_ahi8_l1_netcdf(self, starttime, endtime=None, pattern=None, skip=False):'''下載葵花8號衛星L1 NetCDF數據文件Parameters----------starttime : datetime下載所需數據的起始時間endtime : datetime下載所需數據的起始時間pattern: list, optional模糊匹配參數Returns-------list下載的文件列表'''if endtime is None :endtime = starttimedownfilelist = []nowdate = starttimewhile nowdate <= endtime :# 拼接H8 ftp 目錄sourceRoot = os.path.join('/jma/netcdf', nowdate.strftime("%Y%m"), nowdate.strftime("%d"))sourceRoot = sourceRoot.replace('\\','/')# 獲取文件列表filelist = self.GetFileList(starttime, endtime, sourceRoot, pattern)# filelist = [f for f in filelist if f.startswith('NC_H08_') and f.endswith('.06001_06001.nc')]if len(filelist) == 0 :nowdate += datetime.timedelta(days=1)print('未匹配當前時間【%s】的文件' %(nowdate.strftime('%Y-%m-%d')))continuenowdate += datetime.timedelta(days=1)downfilelist.extend(filelist)return downfilelistdef GetFileList(self, starttime, endtime, srcpath, pattern=None):''' 根據輸入時間,匹配獲取H8 L1數據文件名 '''downfiles = []srcpath = srcpath.replace('\\', '/')filelist = self.ftp.listdir(srcpath)filelist.sort()for filename in filelist :namelist = filename.split('_')nowdate = datetime.datetime.strptime('%s %s' %(namelist[2], namelist[3]), '%Y%m%d %H%M')if (nowdate < starttime) | (nowdate > endtime) :continuedownflag = True# 根據傳入的匹配參數,匹配文件名中是否包含相應的字符串if pattern is not None :if isinstance(pattern, list) :for item in pattern :if item in filename :downflag = True# breakelse:downflag = Falsebreakelif isinstance(pattern, str) :if pattern in filename :downflag = Trueelse:downflag = Falseif downflag :srcname = os.path.join(srcpath, filename)srcname = srcname.replace('\\','/')downfiles.append(srcname)return downfilesdef download(self, outdir, srcfile, blocksize=1*1024, skip=False):"""通過ftp接口下載H8 L1數據文件"""if not os.path.exists(outdir):os.makedirs(outdir)print('成功創建路徑:%s' %(outdir))if isinstance(srcfile, list) :count = len(srcfile)for srcname in srcfile:count -= 1self._download(outdir, srcname, blocksize=blocksize, skip=skip, count=count+1)elif isinstance(srcfile, str) :self._download(outdir, srcfile, blocksize=blocksize, skip=skip)def _download(self, outdir, srcname, blocksize=1*1024, skip=False, count=1):print('='*100)basename = os.path.basename(srcname)dstname = os.path.join(outdir, basename)if skip :return srcnameif os.path.isfile(dstname) :print('文件已存在,跳過下載>>【%s】' %(dstname))return srcnamestime = time.time()print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),'開始下載文件【%d】: %s'%(count, srcname))if self.ftp.downloadFile(srcname, outdir, blocksize=blocksize):print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),'成功下載文件【%s】:%s' %(count, dstname))else:print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),'下載文件失敗【%s】:%s' %(count, dstname))etime = time.time()print('下載文件共用%.2f秒' %(etime - stime))return srcnamedef check_data_completeness(file_list, start_time, end_time):expected_num_files = (end_time - start_time).days *48 + 48 # 48 show 30min/time; 144 show 10min/timeactual_num_files = len(file_list)if actual_num_files == expected_num_files:print("已經下載了全部數據。")else:print("有 %d 個數據文件缺失。" % (expected_num_files - actual_num_files))expected_file_names = []actual_file_names = []for i in range(expected_num_files):file_time = start_time + datetime.timedelta(minutes=i * 30)file_name = "NC_H08_%s_R21_FLDK.06001_06001.nc" % (file_time.strftime("%Y%m%d_%H%M"))expected_file_names.append(file_name)for file_path in file_list:file_name = os.path.basename(file_path)actual_file_names.append(file_name)missing_file_names = set(expected_file_names) - set(actual_file_names)for missing_file_name in missing_file_names:print("缺失文件:%s" % missing_file_name)FTPHOST='ftp.ptree.jaxa.jp'# create an instance of the downloadH8 class
h8_downloader = downloadH8('xxx', 'xxx')
## 2016 1440\0240 loss
# search for H8 files for a specific date
start_time = datetime.datetime(2023, 8, 27)
end_time = datetime.datetime(2023, 9,7,23, 59, 59)
file_list = h8_downloader.search_ahi8_l1_netcdf(start_time, end_time,pattern=['R21','02401_02401'])# 選取每30分鐘的數據文件名
selected_files = []
for file in file_list:if file.endswith(".nc"):if file[40:42] in ["00", "30"]:selected_files.append(file)# 打印選取的文件名
print(selected_files)check_data_completeness(selected_files,start_time, end_time)from tqdm import tqdm for file in tqdm(selected_files):h8_downloader.download('/DatadiskExt/down_h8_code/', file)
使用nohup將腳本提交到后臺,
nohup python down_kuihua8_30min.py > down_H8-2023-08_09-15.log 2>&1 &
并使用tail命令查看相關下載的日志:
tail -f down_H8-2023-08_09-15.log
可以發現數據正在后臺下載