From bed459e4c5503f4bf9fa41f632f1c38be994116e Mon Sep 17 00:00:00 2001 From: fly6516 Date: Mon, 14 Apr 2025 01:58:03 +0800 Subject: [PATCH] =?UTF-8?q?feat:=E7=BB=9F=E8=AE=A1=20HDFS=20=E6=97=A5?= =?UTF-8?q?=E5=BF=97=E4=B8=AD=E7=9A=84=E5=94=AF=E4=B8=80=E4=B8=BB=E6=9C=BA?= =?UTF-8?q?=E6=95=B0=E9=87=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 添加 Spark 代码以读取 HDFS 上的日志文件 - 实现日志行解析函数以提取 IP 地址 - 使用 RDD操作过滤并计算唯一主机数量- 打印结果并停止 SparkContext --- 1-2.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 1-2.py diff --git a/1-2.py b/1-2.py new file mode 100644 index 0000000..12a4e42 --- /dev/null +++ b/1-2.py @@ -0,0 +1,30 @@ +import re +from pyspark import SparkContext + +sc = SparkContext.getOrCreate() + +LOG_PATTERN = re.compile(r'^(\S+) (\S+) (\S+) \[([\w:/]+\s[+-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s?" (\d{3}) (\S+)') + + +def parse_log_line(line): + match = LOG_PATTERN.match(line) + if not match: + return None + + return { + 'ip': match.group(1) + } + + +logFile = "hdfs://master:9000/user/root/apache.access.log.PROJECT" +raw_logs = sc.textFile(logFile) + +parsed_logs = raw_logs.map(parse_log_line).filter(lambda x: x is not None) + +# 提取 IP 地址并统计唯一主机数量 +unique_hosts = parsed_logs.map(lambda log: log['ip']).distinct() +unique_host_count = unique_hosts.count() + +print("Total number of unique hosts: {0}".format(unique_host_count)) + +sc.stop()