python构建共现矩阵

数据预处理部分可以参考之前的那篇LDA模型

读取数据

import numpy as np
import pandas as pd
from pprint import pprint
import xlrd #读取excel数据
import re
import jieba #使用结巴进行中文分词

path = r"D:1研1大四2020.3.13-国家突发卫生事件20201008lda.xlsx" #修改路径
data = xlrd.open_workbook(path)

sheet_1_by_index = data.sheet_by_index(0) #读取表一
title = sheet_1_by_index.col_values(1) #第二列
n_of_rows = sheet_1_by_index.nrows
doc_set = [] #空列表
for i in range(1,n_of_rows): #逐行读取
    doc_set.append(title[i])

数据预处理+存储

#从文件导入停用词表
def stopwordslist(filepath):
    stopwords=[line.strip() for line in open(filepath,'r',encoding='utf-8').readlines()]
    return stopwords
stopwords=stopwordslist(r"D:1研1大四2020.3.13-国家突发卫生事件20201008stopwords.txt")

texts = []#每篇文章关键词
word_set = []#每篇文章关键词不重复
set_word = []#所有关键词的集合
stpwrdlst2 = ['', '', '', '', '','', '','','三要','二要']#去停用词2自编,这里是我自己觉得需要去掉的词
for doc in doc_set:
    #只保留中文
    cleaned_doc = ''.join(re.findall(r'[u4e00-u9fa5]', doc))
    #分词
    doc_cut = jieba.lcut(cleaned_doc)
    #去停用词
    text_list0 = [word for word in doc_cut if word not in stopwords and len(word)>1]
    text_list1 = [word for word in text_list0 if word not in stpwrdlst2]

    #最终处理好的结果存放于text[]中
    texts.append(text_list1)
    for word in texts:
        word_new = list(set(word))#去除一维数组中相同的词
        word_set.append(word_new)
    for subword in word_set:
        for word in subword:
            if word not in set_word:
                set_word.append(word)#统计所有出现的词

构建共现矩阵

# 初始化矩阵
def build_matirx(set_word):
    edge = len(set_word) + 1  # 建立矩阵,矩阵的高度和宽度为关键词集合的长度+1
    '''matrix = np.zeros((edge, edge), dtype=str)'''  # 另一种初始化方法
    matrix = [['' for j in range(edge)] for i in range(edge)]  # 初始化矩阵
    matrix[0][1:] = np.array(set_word)
    matrix = list(map(list, zip(*matrix)))
    matrix[0][1:] = np.array(set_word)  # 赋值矩阵的第一行与第一列
    return matrix
# 计算各个关键词的共现次数
def count_matrix(matrix, texts):
    for row in range(1, len(matrix)):
        # 遍历矩阵第一行,跳过下标为0的元素
        for col in range(1, len(matrix)):
            # 遍历矩阵第一列,跳过下标为0的元素
            # 实际上就是为了跳过matrix中下标为[0][0]的元素,因为[0][0]为空,不为关键词
            if matrix[0][row] == matrix[col][0]:
                # 如果取出的行关键词和取出的列关键词相同,则其对应的共现次数为0,即矩阵对角线为0
                matrix[col][row] = str(0)
            else:
                counter = 0  # 初始化计数器
                for ech in texts:
                    # 遍历格式化后的原始数据,让取出的行关键词和取出的列关键词进行组合,
                    # 再放到每条原始数据中查询
                    if matrix[0][row] in ech and matrix[col][0] in ech:
                        counter += 1
                    else:
                        continue
                matrix[col][row] = str(counter)
    return matrix
def main():
    #print(set_word)
    #print(texts)
    matrix = build_matirx(set_word)
    matrix = count_matrix(matrix, texts)
    data1 = pd.DataFrame(matrix)
    data1.to_csv('D:/02-1python/2020.08.11-lda/1008-covid/chuli/data.csv', index=0, columns=None, encoding='utf_8_sig')


main()

参考:https://www.cnblogs.com/DragonLin/p/12902420.html

原文地址:https://www.cnblogs.com/Cookie-Jing/p/13837525.html