feat(code): 添加 Apriori 和 FP-Growth 算法实现
- 新增 Apriori算法挖掘关联规则的实现 - 新增 FP-Growth算法挖掘频繁项集的实现 - 添加相应的数据预处理和结果保存代码 - 优化代码结构,提高可读性和可维护性
This commit is contained in:
commit
6e9c2a5f91
24
code/5-6_cal_apriori.py
Normal file
24
code/5-6_cal_apriori.py
Normal file
@ -0,0 +1,24 @@
|
||||
#-*- coding: utf-8 -*-
|
||||
#使用Apriori算法挖掘菜品订单关联规则
|
||||
from __future__ import print_function
|
||||
import pandas as pd
|
||||
from apriori import * #导入自行编写的apriori函数
|
||||
|
||||
inputfile = '../data/menu_orders.xls'
|
||||
outputfile = '../tmp/apriori_rules.xlsx' #结果文件,保留 .xlsx 格式
|
||||
data = pd.read_excel(inputfile, header = None)
|
||||
|
||||
print(u'\n转换原始数据至0-1矩阵...')
|
||||
ct = lambda x : pd.Series(1, index = x[pd.notnull(x)]) #转换0-1矩阵的过渡函数
|
||||
b = map(ct, data.iloc[:,:].values) #用map方式执行
|
||||
data = pd.DataFrame(list(b)).fillna(0) #实现矩阵转换,空值用0填充
|
||||
print(u'\n转换完毕。')
|
||||
del b #删除中间变量b,节省内存
|
||||
|
||||
support = 0.2 #最小支持度
|
||||
confidence = 0.5 #最小置信度
|
||||
ms = '---' #连接符,默认'--',用来区分不同元素,如A--B。需要保证原始表格中不含有该字符
|
||||
|
||||
# 提醒用户需要安装 openpyxl 库以支持 .xlsx 格式
|
||||
# 如果未安装,可以通过以下命令安装:pip install openpyxl
|
||||
find_rule(data, support, confidence, ms).to_excel(outputfile, engine='openpyxl') #保存结果,指定 engine='openpyxl'
|
41
code/5-6_cal_fpgrowth.py
Normal file
41
code/5-6_cal_fpgrowth.py
Normal file
@ -0,0 +1,41 @@
|
||||
#-*- coding: utf-8 -*-
|
||||
# 使用FP-Growth算法挖掘菜品订单关联规则
|
||||
from __future__ import print_function
|
||||
import pandas as pd
|
||||
from fpgrowth import find_frequent_itemsets # 导入FP-Growth函数
|
||||
|
||||
inputfile = '../data/menu_orders.xls'
|
||||
outputfile = '../tmp/fpgrowth_rules.xlsx' # 结果文件,保留 .xlsx 格式
|
||||
data = pd.read_excel(inputfile, header=None)
|
||||
|
||||
print(u'\n转换原始数据至0-1矩阵...')
|
||||
ct = lambda x: pd.Series(1, index=x[pd.notnull(x)]) # 转换0-1矩阵的过渡函数
|
||||
b = map(ct, data.iloc[:, :].values) # 用map方式执行
|
||||
data = pd.DataFrame(list(b)).fillna(0) # 实现矩阵转换,空值用0填充
|
||||
print(u'\n转换完毕。')
|
||||
del b # 删除中间变量b,节省内存
|
||||
|
||||
# 将数据转换为事务列表
|
||||
transactions = []
|
||||
for _, row in data.iterrows():
|
||||
transactions.append(list(row[row == 1].index))
|
||||
|
||||
min_support = 0.2 # 最小支持度
|
||||
min_support_count = int(min_support * len(transactions)) # 转换为绝对支持度
|
||||
|
||||
# 使用FP-Growth算法挖掘频繁项集
|
||||
frequent_itemsets = find_frequent_itemsets(transactions, min_support_count)
|
||||
|
||||
# 确保 frequent_itemsets 是一个列表,其中每个元素是一个列表
|
||||
frequent_itemsets = [list(itemset) for itemset in frequent_itemsets]
|
||||
|
||||
# 将结果保存为DataFrame
|
||||
# 修改:将频繁项集转换为DataFrame时,确保每一行对应一个频繁项集的所有元素
|
||||
result_data = []
|
||||
for itemset in frequent_itemsets:
|
||||
result_data.append({'Frequent Itemsets': ', '.join(itemset)}) # 将每个频繁项集转换为字符串
|
||||
|
||||
result = pd.DataFrame(result_data)
|
||||
result.to_excel(outputfile, engine='openpyxl') # 保存结果,指定 engine='openpyxl'
|
||||
|
||||
print(u'\nFP-Growth算法运行完毕,结果已保存至:', outputfile)
|
59
code/apriori.py
Normal file
59
code/apriori.py
Normal file
@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import print_function
|
||||
import pandas as pd
|
||||
|
||||
|
||||
# 自定义连接函数,用于实现L_{k-1}到C_k的连接
|
||||
def connect_string(x, ms):
|
||||
x = list(map(lambda i: sorted(i.split(ms)), x))
|
||||
l = len(x[0])
|
||||
r = []
|
||||
for i in range(len(x)):
|
||||
for j in range(i, len(x)):
|
||||
if x[i][:l - 1] == x[j][:l - 1] and x[i][l - 1] != x[j][l - 1]:
|
||||
r.append(x[i][:l - 1] + sorted([x[j][l - 1], x[i][l - 1]]))
|
||||
return r
|
||||
|
||||
|
||||
# 寻找关联规则的函数
|
||||
def find_rule(d, support, confidence, ms=u'--'):
|
||||
result = pd.DataFrame(index=['support', 'confidence']) # 定义输出结果
|
||||
|
||||
support_series = 1.0 * d.sum() / len(d) # 支持度序列
|
||||
column = list(support_series[support_series > support].index) # 初步根据支持度筛选
|
||||
k = 0
|
||||
|
||||
while len(column) > 1:
|
||||
k = k + 1
|
||||
print(u'\n正在进行第%s次搜索...' % k)
|
||||
column = connect_string(column, ms)
|
||||
print(u'数目:%s...' % len(column))
|
||||
sf = lambda i: d[i].prod(axis=1, numeric_only=True) # 新一批支持度的计算函数
|
||||
|
||||
# 创建连接数据,这一步耗时、耗内存最严重。当数据集较大时,可以考虑并行运算优化。
|
||||
d_2 = pd.DataFrame(list(map(sf, column)), index=[ms.join(i) for i in column]).T
|
||||
|
||||
support_series_2 = 1.0 * d_2[[ms.join(i) for i in column]].sum() / len(d) # 计算连接后的支持度
|
||||
column = list(support_series_2[support_series_2 > support].index) # 新一轮支持度筛选
|
||||
support_series = pd.concat([support_series, support_series_2])
|
||||
column2 = []
|
||||
|
||||
for i in column: # 遍历可能的推理,如{A,B,C}究竟是A+B-->C还是B+C-->A还是C+A-->B?
|
||||
i = i.split(ms)
|
||||
for j in range(len(i)):
|
||||
column2.append(i[:j] + i[j + 1:] + i[j:j + 1])
|
||||
|
||||
cofidence_series = pd.Series(index=[ms.join(i) for i in column2]) # 定义置信度序列
|
||||
|
||||
for i in column2: # 计算置信度序列
|
||||
cofidence_series[ms.join(i)] = support_series[ms.join(sorted(i))] / support_series[ms.join(i[:len(i) - 1])]
|
||||
|
||||
for i in cofidence_series[cofidence_series > confidence].index: # 置信度筛选
|
||||
result.loc[i, 'confidence'] = cofidence_series[i] # 使用 .loc 更新置信度
|
||||
result.loc[i, 'support'] = support_series[ms.join(sorted(i.split(ms)))] # 使用 .loc 更新支持度
|
||||
|
||||
result = result.T.sort_values(['confidence', 'support'], ascending=False) # 结果整理,输出
|
||||
print(u'\n结果为:')
|
||||
print(result)
|
||||
|
||||
return result
|
104
code/fpgrowth.py
Normal file
104
code/fpgrowth.py
Normal file
@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import print_function
|
||||
import pandas as pd
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class FPNode:
|
||||
def __init__(self, item=None, count=0, parent=None):
|
||||
self.item = item
|
||||
self.count = count
|
||||
self.parent = parent
|
||||
self.children = {}
|
||||
self.next = None
|
||||
|
||||
|
||||
def build_fp_tree(data, min_support):
|
||||
# 构建FP树
|
||||
header_table = defaultdict(int)
|
||||
for transaction in data:
|
||||
for item in transaction:
|
||||
header_table[item] += 1
|
||||
|
||||
# 移除不满足最小支持度的项
|
||||
header_table = {k: v for k, v in header_table.items() if v >= min_support}
|
||||
if not header_table:
|
||||
return None, None
|
||||
|
||||
# 初始化头表
|
||||
for k in header_table:
|
||||
header_table[k] = [header_table[k], None]
|
||||
|
||||
root = FPNode()
|
||||
for transaction in data:
|
||||
filtered_items = [item for item in transaction if item in header_table]
|
||||
if filtered_items:
|
||||
filtered_items.sort(key=lambda x: header_table[x][0], reverse=True)
|
||||
update_fp_tree(filtered_items, root, header_table)
|
||||
return root, header_table
|
||||
|
||||
|
||||
def update_fp_tree(items, node, header_table):
|
||||
# 更新FP树
|
||||
if items[0] in node.children:
|
||||
node.children[items[0]].count += 1
|
||||
else:
|
||||
new_node = FPNode(item=items[0], count=1, parent=node)
|
||||
node.children[items[0]] = new_node
|
||||
update_header_table(header_table, items[0], new_node)
|
||||
if len(items) > 1:
|
||||
update_fp_tree(items[1:], node.children[items[0]], header_table)
|
||||
|
||||
|
||||
def update_header_table(header_table, item, target_node):
|
||||
# 更新头表指针
|
||||
if header_table[item][1] is None:
|
||||
header_table[item][1] = target_node
|
||||
else:
|
||||
current = header_table[item][1]
|
||||
while current.next:
|
||||
current = current.next
|
||||
current.next = target_node
|
||||
|
||||
|
||||
def mine_fp_tree(header_table, prefix, min_support, frequent_itemsets):
|
||||
# 挖掘FP树中的频繁项集
|
||||
sorted_items = [item[0] for item in sorted(header_table.items(), key=lambda x: x[1][0])]
|
||||
for item in sorted_items:
|
||||
new_prefix = prefix.copy()
|
||||
new_prefix.add(item)
|
||||
frequent_itemsets.append(new_prefix)
|
||||
conditional_pattern_bases = find_prefix_paths(item, header_table)
|
||||
conditional_fp_tree, conditional_header_table = build_fp_tree(conditional_pattern_bases, min_support)
|
||||
if conditional_header_table:
|
||||
mine_fp_tree(conditional_header_table, new_prefix, min_support, frequent_itemsets)
|
||||
|
||||
|
||||
def find_prefix_paths(base_item, header_table):
|
||||
# 找到条件模式基
|
||||
paths = []
|
||||
node = header_table[base_item][1]
|
||||
while node:
|
||||
path = []
|
||||
ascend_tree(node, path)
|
||||
if path:
|
||||
paths.append(path)
|
||||
node = node.next
|
||||
return paths
|
||||
|
||||
|
||||
def ascend_tree(node, path):
|
||||
# 从节点向上遍历树
|
||||
while node.parent and node.parent.item:
|
||||
path.append(node.parent.item)
|
||||
node = node.parent
|
||||
|
||||
|
||||
def find_frequent_itemsets(data, min_support):
|
||||
# 主函数:使用FP-Growth算法挖掘频繁项集
|
||||
root, header_table = build_fp_tree(data, min_support)
|
||||
if not root:
|
||||
return []
|
||||
frequent_itemsets = []
|
||||
mine_fp_tree(header_table, set(), min_support, frequent_itemsets)
|
||||
return frequent_itemsets
|
BIN
data/menu_orders.xls
Normal file
BIN
data/menu_orders.xls
Normal file
Binary file not shown.
BIN
tmp/apriori_rules.xls
Normal file
BIN
tmp/apriori_rules.xls
Normal file
Binary file not shown.
BIN
tmp/apriori_rules.xlsx
Normal file
BIN
tmp/apriori_rules.xlsx
Normal file
Binary file not shown.
BIN
tmp/fpgrowth_rules.xlsx
Normal file
BIN
tmp/fpgrowth_rules.xlsx
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user