读取log, 以及做成csv

# coding:utf-8
# make_msg_dict.py

line_num = 0
new_block_flag = False
second_flag = False
linux_resource_flag = False
next_predict_flag = False

second_line = ""

threads_num = 0
process_num = 0
torch_threads_num = 0
img_num = 0
batch_size = 0

msg_line_num = 0
linux_resource = 0
all_count = 0

min_list = []
avg_list = []
max_list = []
fps_list = []

all_dict = {}
msg_dict = {}
other_count = 0

gpu = 0
gpu_memory = 0
cpu = 0
cpu_memory = 0

log_file = "rt_t2_process_12-10_19-00.log"
with open(log_file, 'r')as f:
    for i in f:
        line_num += 1
        # 回车行
        if i == "
":
            new_block_flag = False
            continue
        try:
            # 每行内容
            line_msg = i.split("INFO")[1].strip()

        except Exception as error:
            continue

        # INFO 空行
        if not line_msg:
            continue

        # 第一行
        if line_msg == "predict_start":
            second_flag = True
            next_predict_flag = False
            msg_dict = {}
            min_list = []
            avg_list = []
            max_list = []
            fps_list = []
            gpu = 0
            gpu_memory = 0
            cpu = 0
            cpu_memory = 0

            continue

        # 取第二行
        if second_flag:
            second_line = line_msg
            second_ret_list = second_line.replace(" ", '').split(",")
            # print(second_ret_list)
            judge_type = second_ret_list[0].split(":")[0]
            if judge_type == "process_num":
                threads_num = -1
                process_num = int(second_ret_list[0].split(":")[1])
                msg_line_num = process_num * 5
            else:
                process_num = -1
                threads_num = int(second_ret_list[0].split(":")[1])
                msg_line_num = threads_num * 5

            torch_threads_num = int(second_ret_list[1].split(":")[1])
            img_num = int(second_ret_list[2].split(":")[1])
            batch_size = int(second_ret_list[3].split(":")[1])

            second_flag = False
            continue

        # 取 predict_msg
        if msg_line_num > 0:
            msg_line_num -= 1
            if msg_line_num == 0:
                linux_resource_flag = True

                linux_resource = 17
            # predict time
            if ":" in line_msg:
                line_msg_list = line_msg.replace(" ", '').split(":")
                min_list.append(float(line_msg_list[1].split("/")[0]))
                avg_list.append(float(line_msg_list[1].split("/")[1]))
                max_list.append(float(line_msg_list[1].split("/")[2]))
                continue
            if "=" in line_msg:
                line_msg_list = line_msg.replace(" ", '').split("=")
                if line_msg_list[0][-3:] == "fps":
                    fps_list.append(float(line_msg_list[1]))
                    continue

            continue
        # 拿到linux数据
        if linux_resource > 0:
            linux_resource -= 1

            if linux_resource == 0:
                next_predict_flag = True
                linux_resource_flag = False

            # print(line_msg)
            line_msg.strip()
            if line_msg.startswith("cpu utilization"):
                if line_msg.count("/") == 1:
                    cpu = str(int(float(line_msg.split(" ")[2][:-1]))) + "%"
                    continue
            if line_msg.startswith("memory"):
                if line_msg.count("/") == 1:
                    cpu_memory = str(round(float(line_msg.split(" ")[1][:-2]), 2) * 1000) + "MB"
                    continue
            if line_msg.startswith("GPU 0 memory_used"):
                gpu_memory = str(round(float(line_msg.split(":")[1].split("/")[1].replace(" ", '')[:-1]), 2)) + "MB"
                continue
            if line_msg.startswith("GPU 0 Utilization_Rates"):
                gpu = str(int(float(line_msg.split(":")[1].split("/")[1].replace(" ", '')[:-1]))) + "%"
                continue



        # 取predict下的东西
        if not linux_resource_flag and next_predict_flag:
            # print("计数")
            other_count += 1
            msg_dict = {
                "Throughput": round(sum(fps_list), 0) * batch_size,
                "Latency": "{} / {} / {}".format(
                    round(min(min_list) * 1000, 2),
                    round(sum(avg_list) / len(avg_list) * 1000, 2),
                    round(max(max_list) * 1000, 2),
                ),
                "gpu": gpu,
                "gpu_memory": gpu_memory,
                "cpu": cpu,
                "cpu_memory": cpu_memory,
            }
            # 组合dict
            if threads_num != -1:
                if all_dict.get(1):
                    if all_dict[1].get(threads_num):
                        if all_dict[1][threads_num].get(batch_size):
                            all_dict[1][threads_num][batch_size][torch_threads_num] = msg_dict
                        else:
                            all_dict[1][threads_num][batch_size] = {torch_threads_num: msg_dict}
                    else:
                        all_dict[1][threads_num] = {batch_size: {torch_threads_num: msg_dict}}
                else:
                    all_dict[1] = {threads_num: {batch_size: {torch_threads_num: msg_dict}}}
            else:

                if all_dict.get(process_num):
                    if all_dict[process_num].get(1):
                        if all_dict[process_num][1].get(batch_size):
                            all_dict[process_num][1][batch_size][torch_threads_num] = msg_dict
                        else:
                            all_dict[process_num][1][batch_size] = {torch_threads_num: msg_dict}
                    else:
                        all_dict[process_num][1] = {batch_size: {torch_threads_num: msg_dict}}
                else:
                    all_dict[process_num] = {1: {batch_size: {torch_threads_num: msg_dict}}}


print("other_count", other_count)
print("all_dict", all_dict)

# make csv
# coding:utf-8
# make_csv.py

line_num = 0
new_block_flag = False
second_flag = False
linux_resource_flag = False
next_predict_flag = False

second_line = ""

threads_num = 0
process_num = 0
torch_threads_num = 0
img_num = 0
batch_size = 0

msg_line_num = 0
linux_resource = 0
all_count = 0

min_list = []
avg_list = []
max_list = []
fps_list = []

all_dict = {}
msg_dict = {}
other_count = 0

gpu = 0
gpu_memory = 0
cpu = 0
cpu_memory = 0

import os
log_dir_list = os.listdir("log_dir")
for log_file in log_dir_list:
    all_dict = {}
    other_count = 0
    with open("log_dir/{}".format(log_file), 'r')as f:
        for i in f:
            line_num += 1
            # 回车行
            if i == "
":
                new_block_flag = False
                continue
            try:
                # 每行内容
                line_msg = i.split("INFO")[1].strip()

            except Exception as error:
                continue

            # INFO 空行
            if not line_msg:
                continue

            # 第一行
            if line_msg == "predict_start":
                second_flag = True
                next_predict_flag = False
                msg_dict = {}
                min_list = []
                avg_list = []
                max_list = []
                fps_list = []
                gpu = 0
                gpu_memory = 0
                cpu = 0
                cpu_memory = 0

                continue

            # 取第二行
            if second_flag:
                second_line = line_msg
                second_ret_list = second_line.replace(" ", '').split(",")
                # print(second_ret_list)
                judge_type = second_ret_list[0].split(":")[0]
                if judge_type == "process_num":
                    threads_num = -1
                    process_num = int(second_ret_list[0].split(":")[1])
                    msg_line_num = process_num * 5
                else:
                    process_num = -1
                    threads_num = int(second_ret_list[0].split(":")[1])
                    msg_line_num = threads_num * 5

                torch_threads_num = int(second_ret_list[1].split(":")[1])
                img_num = int(second_ret_list[2].split(":")[1])
                batch_size = int(second_ret_list[3].split(":")[1])

                second_flag = False
                continue

            # 取 predict_msg
            if msg_line_num > 0:
                msg_line_num -= 1
                if msg_line_num == 0:
                    linux_resource_flag = True

                    linux_resource = 17
                # predict time
                if ":" in line_msg:
                    line_msg_list = line_msg.replace(" ", '').split(":")
                    min_list.append(float(line_msg_list[1].split("/")[0]))
                    avg_list.append(float(line_msg_list[1].split("/")[1]))
                    max_list.append(float(line_msg_list[1].split("/")[2]))
                    continue
                if "=" in line_msg:
                    line_msg_list = line_msg.replace(" ", '').split("=")
                    if line_msg_list[0][-3:] == "fps":
                        fps_list.append(float(line_msg_list[1]))
                        continue

                continue
            # 拿到linux数据
            if linux_resource > 0:
                linux_resource -= 1

                if linux_resource == 0:
                    next_predict_flag = True
                    linux_resource_flag = False

                # print(line_msg)
                line_msg.strip()
                if line_msg.startswith("cpu utilization"):
                    if line_msg.count("/") == 1:
                        cpu = str(int(float(line_msg.split(" ")[2][:-1]))) + "%"
                        continue
                if line_msg.startswith("memory"):
                    if line_msg.count("/") == 1:
                        cpu_memory = str(round(float(line_msg.split(" ")[1][:-2]), 2) * 1000) + "MB"
                        continue
                if line_msg.startswith("GPU 0 memory_used"):
                    gpu_memory = str(round(float(line_msg.split(":")[1].split("/")[1].replace(" ", '')[:-1]), 2)) + "MB"
                    continue
                if line_msg.startswith("GPU 0 Utilization_Rates"):
                    gpu = str(int(float(line_msg.split(":")[1].split("/")[1].replace(" ", '')[:-1]))) + "%"
                    continue



            # 取predict下的东西
            if not linux_resource_flag and next_predict_flag:
                # print("计数")
                other_count += 1
                msg_dict = {
                    "Throughput": round(sum(fps_list), 0) * batch_size,
                    "Latency": "{} / {} / {}".format(
                        round(min(min_list) * 1000, 2),
                        round(sum(avg_list) / len(avg_list) * 1000, 2),
                        round(max(max_list) * 1000, 2),
                    ),
                    "gpu": gpu,
                    "gpu_memory": gpu_memory,
                    "cpu": cpu,
                    "cpu_memory": cpu_memory,
                }
                # 组合dict
                if threads_num != -1:
                    if all_dict.get(1):
                        if all_dict[1].get(threads_num):
                            if all_dict[1][threads_num].get(batch_size):
                                all_dict[1][threads_num][batch_size][torch_threads_num] = msg_dict
                            else:
                                all_dict[1][threads_num][batch_size] = {torch_threads_num: msg_dict}
                        else:
                            all_dict[1][threads_num] = {batch_size: {torch_threads_num: msg_dict}}
                    else:
                        all_dict[1] = {threads_num: {batch_size: {torch_threads_num: msg_dict}}}
                else:

                    if all_dict.get(process_num):
                        if all_dict[process_num].get(1):
                            if all_dict[process_num][1].get(batch_size):
                                all_dict[process_num][1][batch_size][torch_threads_num] = msg_dict
                            else:
                                all_dict[process_num][1][batch_size] = {torch_threads_num: msg_dict}
                        else:
                            all_dict[process_num][1] = {batch_size: {torch_threads_num: msg_dict}}
                    else:
                        all_dict[process_num] = {1: {batch_size: {torch_threads_num: msg_dict}}}


    print("other_count", other_count)
    # print("all_dict", all_dict)

    # make csv
    cvs_str = ''
    all_cvs_str = ''
    threads_count = 0
    for process, threads_num_dict in all_dict.items():
        cvs_str = str(process) + ","

        for threads_num, batch_size_dict in threads_num_dict.items():
            th_before_cvs_str = cvs_str
            cvs_str += str(threads_num) + ","

            for batch_size, torch_threads_num_dict in batch_size_dict.items():
                b_before_cvs_str = cvs_str
                cvs_str += str(batch_size) + ","
                for torch_threads_num, msg_dict in torch_threads_num_dict.items():
                    t_before_cvs_str = cvs_str
                    cvs_str += str(torch_threads_num) + ","

                    msg_str = ",".join([str(i) for i in list(msg_dict.values())])
                    cvs_str += str(msg_str)
                    all_cvs_str += cvs_str + "
"
                    cvs_str = t_before_cvs_str

                cvs_str = b_before_cvs_str

            cvs_str = th_before_cvs_str

    print(all_cvs_str.count("
"))
    with open("{}.csv".format(log_file), 'w') as f:
        f.write(all_cvs_str)
    all_cvs_str = ""
原文地址:https://www.cnblogs.com/wjw6692353/p/14118405.html