feat:绘制每日 404 响应代码记录的折线图
- 新增2-6.py 文件,实现日志解析和统计功能 - 使用 Spark 集群处理大规模日志数据 - 提取每日404 错误次数并使用 Matplotlib 绘制折线图 - 通过正则表达式解析日志,过滤出404 状态码的日志 - 按日期统计404 错误次数,并排序 - 最后展示折线图,直观显示每日 404 错误的变化趋势
This commit is contained in:
parent
ede4f6c21f
commit
fbb71a0e8e
68
2-7.py
Normal file
68
2-7.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
import re
|
||||||
|
from pyspark import SparkContext
|
||||||
|
|
||||||
|
# 初始化 SparkContext
|
||||||
|
sc = SparkContext.getOrCreate()
|
||||||
|
|
||||||
|
# 日志匹配的正则表达式
|
||||||
|
LOG_PATTERN = re.compile(
|
||||||
|
r'^(\S+) (\S+) (\S+) \[([\w:/]+\s[+-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s?" (\d{3}) (\S+)'
|
||||||
|
)
|
||||||
|
|
||||||
|
# 解析日志的函数
|
||||||
|
def parse_log_line(line):
|
||||||
|
match = LOG_PATTERN.match(line)
|
||||||
|
if not match:
|
||||||
|
return None
|
||||||
|
|
||||||
|
content_size_str = match.group(9)
|
||||||
|
content_size = int(content_size_str) if content_size_str.isdigit() else 0
|
||||||
|
|
||||||
|
return {
|
||||||
|
'ip': match.group(1),
|
||||||
|
'user_identity': match.group(2),
|
||||||
|
'user_id': match.group(3),
|
||||||
|
'timestamp': match.group(4),
|
||||||
|
'method': match.group(5),
|
||||||
|
'endpoint': match.group(6),
|
||||||
|
'protocol': match.group(7),
|
||||||
|
'status_code': int(match.group(8)),
|
||||||
|
'content_size': content_size
|
||||||
|
}
|
||||||
|
|
||||||
|
def extract_day(log):
|
||||||
|
# 时间格式为:10/Oct/2000:13:55:36 -0700
|
||||||
|
full_date = log['timestamp']
|
||||||
|
day = full_date.split('/')[0] # 只提取日
|
||||||
|
return day
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# 加载日志文件
|
||||||
|
logFile = "hdfs://master:9000/user/root/apache.access.log.PROJECT"
|
||||||
|
raw_logs = sc.textFile(logFile)
|
||||||
|
|
||||||
|
# 解析并过滤有效日志
|
||||||
|
access_logs = raw_logs.map(parse_log_line).filter(lambda x: x is not None).cache()
|
||||||
|
|
||||||
|
# 过滤 404 状态码的日志
|
||||||
|
error_404_logs = access_logs.filter(lambda log: log['status_code'] == 404).cache()
|
||||||
|
|
||||||
|
# 每日 404 次数统计
|
||||||
|
errDateSorted = (
|
||||||
|
error_404_logs
|
||||||
|
.map(lambda log: (extract_day(log), 1))
|
||||||
|
.reduceByKey(lambda a, b: a + b)
|
||||||
|
.sortBy(lambda x: x[1], ascending=False) # 按次数降序排序
|
||||||
|
.cache()
|
||||||
|
)
|
||||||
|
|
||||||
|
# 获取最多的五天
|
||||||
|
top_5_days = errDateSorted.take(5)
|
||||||
|
|
||||||
|
# 输出前五天及其 404 错误记录
|
||||||
|
print("404 错误记录最多的五天及对应次数:")
|
||||||
|
for i, (day, count) in enumerate(top_5_days):
|
||||||
|
print(f"第 {i + 1} 天: {day} => {count} 次 404 错误")
|
||||||
|
|
||||||
|
# 停止 Spark
|
||||||
|
sc.stop()
|
Loading…
Reference in New Issue
Block a user