python脚本随便记录一次

import pandas as pd
from pathlib import Path
import numpy as np
from tqdm import tqdm
import re
import os
import datetime
from datetime import date
import pickle
import itertools
import time
from io import StringIO
from sqlalchemy import create_engine, Column, String
import schedule



start = datetime.datetime.now()
# ルートフォルダ
root = r'\10.197.104.956g_data6G'  # 元々はCAN_Bまで指定していたのを一つ上の階層に設定
p = Path(root)

# 出力先フォルダ作成
os.makedirs('output/merge_data', exist_ok=True)
os.makedirs('output/processed_data/duplicated', exist_ok=True)
os.makedirs('cache', exist_ok=True)
os.makedirs('log', exist_ok=True)

# データ取得期間
st_date = date(2018, 10, 1)
ed_date = date.today()

# =============================================================================
# 55_カメラ組立

# データ格納用
df = pd.DataFrame()
# キャッシュ格納用
new_file = []

process = 'camera_assy'
print(f'''【{process}】  {datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")}''')

for line_name in ['CAN_B', 'CAN_G']:
    print(line_name)
    # 工程のフォルダを指定
    print('■対象ファイル一覧を取得')
    # ***********************注释掉 by zhangwensi ********
    #all_file = list(p.glob(f'''{line_name}/称重/GP-KDE3[A-F]*RC*/LOG/*.csv''')) + list(
    #    p.glob(f'''{line_name}/称重/GP-KDE301GF/LOG/*.csv'''))
    #print(f'''----------------> 全ファイル数:{len(all_file)}''')    
    # ***********************注释掉 by zhangwensi ********    
    
    
    # ***********************add by zhangwensi *******************************************************************************************************************
    # step 1 获取今天的文件集合并转换成DF
    current_files_list=[]
    for fileName in p.glob(f'''{line_name}/称重/GP-KDE3[A-F]*RC*/LOG/*.csv'''):
        getmtime = date.fromtimestamp(os.path.getmtime(fileName))
        current_files_list.append((str(fileName), getmtime))
    for fileName in p.glob(f'''{line_name}/称重/GP-KDE301GF/LOG/*.csv'''):
        getmtime = date.fromtimestamp(os.path.getmtime(fileName))
        current_files_list.append((str(fileName), getmtime))
    
    current_files_df = pd.DataFrame(current_files_list, columns=['filename', 'updateTime'])
    print(f'''---------------->全ファイル数:{len(current_files_df)}''')
    
    # step2 加载昨日pickle文件,并转换成DF
    try:
        with open(f'''cache/{process}_file_list.pckl''', mode='rb') as f:
            # 加载pickle文件
            cache = pickle.load(f)
            # 转换成文件名的数组
            last_files_list = list(map(lambda x: str(x), cache))
    except FileNotFoundError:
        cache = []
    # last_files_list转换df
    last_files_df = pd.DataFrame(last_files_list, columns=['filename'])
    
    # step3 获取两天的diff
    from pandasql import sqldf
    diff = sqldf(f"""
            select t1.filename
              from current_files_df t1
             LEFT JOIN last_files_df t2 on (t1.filename = t2.filename)
            where t2.filename is null
              and updateTime >= '{st_date}'
              and updateTime < '{ed_date}';
               """, globals()).filename.tolist()
    print(f'''----------------> 前回処理時から新たに追加されたファイル数:{len(diff)}''')
    new_file_tmp = diff
    
    # ***********************add by zhangwensi *******************************************************************************************************************
欢迎对it热情的同学,加qq进行技术讨论; QQ:850922253
原文地址:https://www.cnblogs.com/zhangwensi/p/13167701.html