Skip to content

Commit 5293e3b

Browse files
committedDec 18, 2014
project init
0 parents  commit 5293e3b

31 files changed

+1231
-0
lines changed
 

‎.classpath

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<classpath>
3+
<classpathentry kind="src" output="target/classes" path="src/main/java">
4+
<attributes>
5+
<attribute name="optional" value="true"/>
6+
<attribute name="maven.pomderived" value="true"/>
7+
</attributes>
8+
</classpathentry>
9+
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
10+
<attributes>
11+
<attribute name="optional" value="true"/>
12+
<attribute name="maven.pomderived" value="true"/>
13+
</attributes>
14+
</classpathentry>
15+
<classpathentry including="**/*.java" kind="src" path="src/main/resources"/>
16+
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
17+
<attributes>
18+
<attribute name="maven.pomderived" value="true"/>
19+
</attributes>
20+
</classpathentry>
21+
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6">
22+
<attributes>
23+
<attribute name="maven.pomderived" value="true"/>
24+
</attributes>
25+
</classpathentry>
26+
<classpathentry kind="output" path="target/classes"/>
27+
</classpath>

‎.project

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<projectDescription>
3+
<name>logcount</name>
4+
<comment></comment>
5+
<projects>
6+
</projects>
7+
<buildSpec>
8+
<buildCommand>
9+
<name>org.eclipse.jdt.core.javabuilder</name>
10+
<arguments>
11+
</arguments>
12+
</buildCommand>
13+
<buildCommand>
14+
<name>org.eclipse.m2e.core.maven2Builder</name>
15+
<arguments>
16+
</arguments>
17+
</buildCommand>
18+
</buildSpec>
19+
<natures>
20+
<nature>org.eclipse.jdt.core.javanature</nature>
21+
<nature>org.eclipse.m2e.core.maven2Nature</nature>
22+
</natures>
23+
</projectDescription>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
eclipse.preferences.version=1
2+
encoding//src/main/java=UTF-8
3+
encoding//src/main/resources=UTF-8
4+
encoding//src/test/java=UTF-8
5+
encoding/<project>=UTF-8

‎.settings/org.eclipse.jdt.core.prefs

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
eclipse.preferences.version=1
2+
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
3+
org.eclipse.jdt.core.compiler.compliance=1.6
4+
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
5+
org.eclipse.jdt.core.compiler.source=1.6

‎.settings/org.eclipse.m2e.core.prefs

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
activeProfiles=
2+
eclipse.preferences.version=1
3+
resolveWorkspaceProjects=true
4+
version=1

‎README.md

+100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
# 日志分析系统
2+
3+
## 系统架构
4+
5+
本使用kafka,spark,hbase开发日志分析系统。
6+
7+
### 软件模块
8+
9+
Kafka:作为日志事件的消息系统,具有分布式,可分区,可冗余的消息服务功能。
10+
Spark:使用spark stream功能,实时分析消息系统中的数据,完成计算分析工作。
11+
Hbase:做为后端存储,存储spark计算结构,供其他系统进行调用
12+
13+
## 环境部署
14+
15+
### 软件版本
16+
17+
hadoop 版本 : Hadoop相关软件如zookeeper、hadoop、hbase,使用的是cloudera的 cdh 5.2.0 版本。
18+
Kafka : 2.9.2-0.8.1.1
19+
20+
### 软件安装
21+
22+
a. 部署kafka
23+
24+
tar -xzf kafka_2.9.2-0.8.1.1.tgz
25+
26+
b. 编辑kafka 配置文件
27+
28+
config/server-1.properties:
29+
broker.id=0
30+
port=9093
31+
log.dir=/tmp/kafka-logs
32+
33+
config/server-2.properties:
34+
broker.id=1
35+
port=9093
36+
log.dir=/tmp/kafka-logs
37+
38+
config/server-3.properties:
39+
broker.id=2
40+
port=9093
41+
log.dir=/tmp/kafka-logs
42+
43+
c. 启动kafka
44+
45+
bin/kafka-server-start.sh config/server-1.properties &
46+
bin/kafka-server-start.sh config/server-2.properties &
47+
bin/kafka-server-start.sh config/server-3.properties &
48+
49+
d. 创建kafka topic
50+
51+
> bin/kafka-topics.sh --create --zookeeper 10.10.102.191:2181, 10.10.102.192:2181, 10.10.102.193:2181 --replication-factor 3 --partitions 1 --topic recsys
52+
53+
e. 查看是否创建成功
54+
55+
> bin/kafka-topics.sh --list --zookeeper localhost:2181
56+
57+
> bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic my-replicated-topic
58+
Topic:my-replicated-topic PartitionCount:1 ReplicationFactor:3 Configs:
59+
Topic: my-replicated-topic Partition: 0 Leader: 1 Replicas: 1,2,0 Isr: 1,2,0
60+
61+
62+
63+
f. kafka启动测试
64+
65+
> bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test
66+
This is a message
67+
This is another message
68+
69+
> bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning
70+
This is a message
71+
This is another message
72+
73+
e) 注意事项
74+
75+
在开发程序的时候,producer客户端必须要配置上broker的host映射信息,即使你的程序中使用的都是ip地址。
76+
77+
## 项目开发
78+
79+
### Kafka
80+
81+
程序依赖Jar包
82+
客户端依赖 libs目录:
83+
Logback包:logback-classic-1.1.2.jar,logback-core-1.1.2.jar
84+
Kafka包(在kafka安装包lib目录中)
85+
客户端配置文件 conf目录:
86+
Logback:logback.xml
87+
88+
### Spark_Streaming 处理数据
89+
### HBase 保存数据
90+
91+
创建hbase表
92+
93+
create ‘recsys_logs’,’f’
94+
95+
服务器端部署.服务器端启动了一个httpserver,该server需要将jar包中的html页面解压出来,所以先解压,后运行程序
96+
97+
jar xvf recsys-1.0.jar
98+
99+
100+

‎pom.xml

+87
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
2+
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
3+
<modelVersion>4.0.0</modelVersion>
4+
5+
<groupId>com.wankun</groupId>
6+
<artifactId>logcount</artifactId>
7+
<version>1.0</version>
8+
<packaging>jar</packaging>
9+
10+
<name>logcount</name>
11+
<url>http://maven.apache.org</url>
12+
13+
<repositories>
14+
<repository>
15+
<id>cloudera</id>
16+
<url>https://repository.cloudera.com/cloudera/cloudera-repos</url>
17+
</repository>
18+
</repositories>
19+
20+
<properties>
21+
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
22+
<spark.version>1.1.0-cdh5.2.0</spark.version>
23+
<hbase.version>0.98.6-cdh5.2.0</hbase.version>
24+
</properties>
25+
<dependencies>
26+
<dependency>
27+
<groupId>jdk.tools</groupId>
28+
<artifactId>jdk.tools</artifactId>
29+
<version>1.7.0_51</version>
30+
<scope>system</scope>
31+
<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
32+
</dependency>
33+
<dependency>
34+
<groupId>ch.qos.logback</groupId>
35+
<artifactId>logback-classic</artifactId>
36+
<version>1.1.2</version>
37+
</dependency>
38+
<dependency>
39+
<groupId>org.apache.kafka</groupId>
40+
<artifactId>kafka_2.10</artifactId>
41+
<version>0.8.1.1</version>
42+
</dependency>
43+
44+
<dependency>
45+
<groupId>org.apache.spark</groupId>
46+
<artifactId>spark-streaming_2.10</artifactId>
47+
<version>${spark.version}</version>
48+
</dependency>
49+
<dependency>
50+
<groupId>org.apache.spark</groupId>
51+
<artifactId>spark-streaming-kafka_2.10</artifactId>
52+
<version>${spark.version}</version>
53+
</dependency>
54+
55+
<dependency>
56+
<groupId>org.apache.hbase</groupId>
57+
<artifactId>hbase-client</artifactId>
58+
<version>${hbase.version}</version>
59+
</dependency>
60+
<dependency>
61+
<groupId>org.apache.hbase</groupId>
62+
<artifactId>hbase-server</artifactId>
63+
<version>${hbase.version}</version>
64+
</dependency>
65+
66+
<dependency>
67+
<groupId>junit</groupId>
68+
<artifactId>junit</artifactId>
69+
<version>4.11</version>
70+
<scope>test</scope>
71+
</dependency>
72+
</dependencies>
73+
74+
<build>
75+
<plugins>
76+
<plugin>
77+
<artifactId>maven-compiler-plugin</artifactId>
78+
<version>3.1</version>
79+
<configuration>
80+
<source>1.6</source>
81+
<target>1.6</target>
82+
<encoding>UTF-8</encoding>
83+
</configuration>
84+
</plugin>
85+
</plugins>
86+
</build>
87+
</project>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
package com.wankun.logcount.kafka;
2+
3+
import java.text.SimpleDateFormat;
4+
import java.util.Date;
5+
import java.util.Properties;
6+
import java.util.concurrent.BlockingQueue;
7+
8+
import org.slf4j.Logger;
9+
import org.slf4j.LoggerFactory;
10+
11+
import kafka.javaapi.producer.Producer;
12+
import kafka.producer.KeyedMessage;
13+
import kafka.producer.ProducerConfig;
14+
15+
public class MsgSender extends Thread {
16+
private final static Logger logger = LoggerFactory.getLogger(MsgSender.class);
17+
18+
private SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd HH:mm:ss");
19+
20+
private BlockingQueue<String> queue;
21+
private Producer<String, String> producer;
22+
23+
public MsgSender(BlockingQueue<String> queue) {
24+
this.queue = queue;
25+
26+
Properties props = new Properties();
27+
props.put("metadata.broker.list", "10.10.102.191:9092,10.10.102.192:9092,10.10.102.193:9092");
28+
props.put("serializer.class", "kafka.serializer.StringEncoder");
29+
// props.put("partitioner.class", "example.producer.SimplePartitioner");
30+
props.put("request.required.acks", "1");
31+
32+
ProducerConfig config = new ProducerConfig(props);
33+
34+
producer = new Producer<String, String>(config);
35+
}
36+
37+
@Override
38+
public void run() {
39+
while (true) {
40+
try {
41+
String line = queue.take();
42+
if (line != null && !line.replace("\n", "").replace("\r", "").equals("")) {
43+
String timestamp = sdf.format(new Date());
44+
KeyedMessage<String, String> data = new KeyedMessage<String, String>("recsys", timestamp, line);
45+
logger.debug("sending kv :( {}:{})", timestamp, line);
46+
producer.send(data);
47+
}
48+
} catch (InterruptedException e) {
49+
logger.error("kafka producer 消息发送失败", e);
50+
}
51+
}
52+
}
53+
54+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
package com.wankun.logcount.kafka;
2+
3+
import java.io.BufferedReader;
4+
import java.io.File;
5+
import java.io.FileReader;
6+
import java.io.IOException;
7+
import java.nio.CharBuffer;
8+
import java.util.concurrent.BlockingQueue;
9+
10+
import org.slf4j.Logger;
11+
import org.slf4j.LoggerFactory;
12+
13+
public class TailLog extends Thread {
14+
private final static Logger logger = LoggerFactory.getLogger(TailLog.class);
15+
16+
private BlockingQueue<String> queue;
17+
private String logname;
18+
19+
private CharBuffer buf = CharBuffer.allocate(4096);
20+
21+
// private ByteBuffer buf = ByteBuffer.allocate(4096);
22+
23+
public TailLog(BlockingQueue<String> queue, String logname) {
24+
this.queue = queue;
25+
this.logname = logname;
26+
}
27+
28+
@Override
29+
public void run() {
30+
BufferedReader reader = null;
31+
try {
32+
// Path logpath=Paths.get(logname);
33+
// File posfile =
34+
// logpath.getParent().resolve("."+logpath.getFileName()+".pos").toFile();
35+
reader = new BufferedReader(new FileReader(new File(logname)));
36+
37+
long filesize = 0;
38+
while (true) {
39+
// 判断文件是否已经切换
40+
if (filesize > new File(logname).length()) {
41+
logger.debug("filesize :{} current system file size :{} . Log file switchover!", filesize,
42+
new File(logname).length());
43+
try {
44+
// 在切换读文件前,读取文件全部内容
45+
StringBuilder line = new StringBuilder();
46+
while (reader.read(buf) > 0) {
47+
buf.flip();
48+
synchronized (buf) {
49+
// 读buffer 并解析
50+
for (int i = 0; i < buf.limit(); i++) {
51+
char c = buf.get();
52+
line.append(c);
53+
if ((c == '\n') || (c == '\r'))
54+
if (line.length() > 0) {
55+
queue.put(line.toString());
56+
line = new StringBuilder();
57+
}
58+
}
59+
}
60+
}
61+
queue.put(line.toString());
62+
buf.clear();
63+
64+
// 切换读文件
65+
if (reader != null)
66+
reader.close();
67+
reader = new BufferedReader(new FileReader(new File(logname)));
68+
} catch (Exception e) {
69+
logger.error("文件 {} 不存在", logname, e);
70+
Thread.currentThread().sleep(10000);
71+
continue;
72+
}
73+
}
74+
75+
for (int retrys = 10; retrys > 0; retrys--) {
76+
int bufread = reader.read(buf);
77+
if (bufread < 0) {
78+
if (retrys > 0)
79+
Thread.currentThread().sleep(1000);
80+
else {
81+
// 等待10s后无新数据读出
82+
synchronized (buf) {
83+
// 等待 cachetime 秒后文件仍未写入
84+
buf.flip();
85+
char[] dst = new char[buf.length()];
86+
buf.get(dst);
87+
buf.clear();
88+
queue.put(new String(dst));
89+
}
90+
}
91+
} else {
92+
filesize = new File(logname).length();
93+
retrys = -1;
94+
95+
buf.flip();
96+
synchronized (buf) {
97+
// 读buffer 并解析
98+
StringBuilder line = new StringBuilder();
99+
for (int i = 0; i < buf.limit(); i++) {
100+
char c = buf.get();
101+
line.append(c);
102+
if ((c == '\n') || (c == '\r'))
103+
if (line.length() > 0) {
104+
queue.put(line.toString());
105+
line = new StringBuilder();
106+
}
107+
}
108+
// 接着写不完整的数据
109+
buf.compact();
110+
if (line.length() > 0) {
111+
buf.append(line);
112+
}
113+
}
114+
break;
115+
}
116+
}
117+
}
118+
} catch (Exception e) {
119+
logger.error("文件读取失败", e);
120+
} finally {
121+
if (reader != null) {
122+
try {
123+
reader.close();
124+
} catch (IOException e) {
125+
logger.error("文件 reader 关闭失败", e);
126+
}
127+
}
128+
}
129+
130+
}
131+
132+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
package com.wankun.logcount.kafka;
2+
3+
import java.util.concurrent.ArrayBlockingQueue;
4+
import java.util.concurrent.BlockingQueue;
5+
6+
import org.slf4j.Logger;
7+
import org.slf4j.LoggerFactory;
8+
9+
public class TailService {
10+
11+
private final static Logger logger = LoggerFactory.getLogger(TailService.class);
12+
13+
public static void main(String[] args) {
14+
if (args.length < 1) {
15+
logger.error("Usage : TailService [logfile]");
16+
System.exit(0);
17+
}
18+
19+
BlockingQueue<String> queue = new ArrayBlockingQueue<String>(10000);
20+
21+
for (String arg : args) {
22+
new TailLog(queue, arg).start();
23+
}
24+
25+
new MsgSender(queue).start();
26+
}
27+
28+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
package com.wankun.logcount.spark;
2+
3+
import java.io.IOException;
4+
import java.net.InetSocketAddress;
5+
import java.net.URI;
6+
import java.text.SimpleDateFormat;
7+
import java.util.Date;
8+
import java.util.Map;
9+
10+
import org.apache.hadoop.conf.Configuration;
11+
import org.apache.hadoop.hbase.HBaseConfiguration;
12+
import org.apache.hadoop.hbase.client.HConnection;
13+
import org.apache.hadoop.hbase.client.HConnectionManager;
14+
import org.apache.hadoop.hbase.client.HTableInterface;
15+
import org.apache.hadoop.hbase.client.Put;
16+
import org.apache.hadoop.hbase.util.Bytes;
17+
import org.apache.hadoop.http.HttpServer2;
18+
import org.apache.hadoop.net.NetUtils;
19+
import org.apache.spark.SparkConf;
20+
import org.apache.spark.api.java.JavaRDD;
21+
import org.apache.spark.api.java.function.Function;
22+
import org.apache.spark.streaming.Duration;
23+
import org.apache.spark.streaming.api.java.JavaDStream;
24+
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
25+
import org.apache.spark.streaming.api.java.JavaStreamingContext;
26+
import org.apache.spark.streaming.kafka.KafkaUtils;
27+
import org.slf4j.Logger;
28+
import org.slf4j.LoggerFactory;
29+
30+
import scala.Tuple2;
31+
32+
import com.google.common.collect.Maps;
33+
34+
public class LogStream {
35+
private final static Logger logger = LoggerFactory.getLogger(LogStream.class);
36+
37+
private static HConnection connection = null;
38+
private static HTableInterface table = null;
39+
private static HttpServer2 infoServer = null;
40+
41+
public static void openHBase(String tablename) throws IOException {
42+
Configuration conf = HBaseConfiguration.create();
43+
synchronized (HConnection.class) {
44+
if (connection == null)
45+
connection = HConnectionManager.createConnection(conf);
46+
}
47+
48+
synchronized (HTableInterface.class) {
49+
if (table == null) {
50+
table = connection.getTable("recsys_logs");
51+
}
52+
}
53+
54+
/* start http info server */
55+
HttpServer2.Builder builder = new HttpServer2.Builder().setName("recsys").setConf(conf);
56+
InetSocketAddress addr = NetUtils.createSocketAddr("0.0.0.0", 8089);
57+
builder.addEndpoint(URI.create("http://" + NetUtils.getHostPortString(addr)));
58+
infoServer = builder.build();
59+
60+
infoServer.addServlet("monitor", "/monitor", RecsysLogs.class);
61+
infoServer.setAttribute("htable", table);
62+
infoServer.setAttribute("conf", conf);
63+
infoServer.start();
64+
}
65+
66+
public static void closeHBase() {
67+
if (table != null)
68+
try {
69+
table.close();
70+
} catch (IOException e) {
71+
logger.error("关闭 table 出错", e);
72+
}
73+
if (connection != null)
74+
try {
75+
connection.close();
76+
} catch (IOException e) {
77+
logger.error("关闭 connection 出错", e);
78+
}
79+
if (infoServer != null && infoServer.isAlive())
80+
try {
81+
infoServer.stop();
82+
} catch (Exception e) {
83+
logger.error("关闭 infoServer 出错", e);
84+
}
85+
}
86+
87+
public static void main(String[] args) {
88+
// open hbase
89+
try {
90+
openHBase("logcount");
91+
} catch (IOException e) {
92+
logger.error("建立HBase 连接失败", e);
93+
System.exit(-1);
94+
}
95+
96+
SparkConf conf = new SparkConf().setAppName("recsys log stream");
97+
JavaStreamingContext ssc = new JavaStreamingContext(conf, new Duration(1000));
98+
99+
Map<String, Integer> topicMap = Maps.newHashMap();
100+
topicMap.put("recsys", 4);
101+
JavaPairReceiverInputDStream<String, String> logstream = KafkaUtils.createStream(ssc,
102+
"10.10.102.191:2181,10.10.102.192:2181,10.10.102.193:2181", "recsys_group1", topicMap);
103+
104+
JavaDStream<String> lines = logstream.map(new Function<Tuple2<String, String>, String>() {
105+
private static final long serialVersionUID = -1801798365843350169L;
106+
107+
@Override
108+
public String call(Tuple2<String, String> tuple2) {
109+
return tuple2._2();
110+
}
111+
}).filter(new Function<String, Boolean>() {
112+
private static final long serialVersionUID = 7786877762996470593L;
113+
114+
@Override
115+
public Boolean call(String msg) throws Exception {
116+
return msg.indexOf("character service received paramters:") > 0;
117+
}
118+
});
119+
120+
// 统计Log中的数据,并保存到HBase中
121+
JavaDStream<Long> nums = lines.count();
122+
nums.foreachRDD(new Function<JavaRDD<Long>, Void>() {
123+
124+
private SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd HH:mm:ss");
125+
126+
@Override
127+
public Void call(JavaRDD<Long> rdd) throws Exception {
128+
Long num = rdd.take(1).get(0);
129+
String ts = sdf.format(new Date());
130+
Put put = new Put(Bytes.toBytes(ts));
131+
put.add(Bytes.toBytes("f"), Bytes.toBytes("nums"), Bytes.toBytes(num));
132+
table.put(put);
133+
return null;
134+
}
135+
});
136+
137+
ssc.start();
138+
ssc.awaitTermination();
139+
}
140+
141+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
package com.wankun.logcount.spark;
2+
3+
import java.io.IOException;
4+
5+
import javax.servlet.ServletException;
6+
import javax.servlet.http.HttpServlet;
7+
import javax.servlet.http.HttpServletRequest;
8+
import javax.servlet.http.HttpServletResponse;
9+
10+
import org.apache.hadoop.hbase.client.HTableInterface;
11+
import org.apache.hadoop.hbase.client.Result;
12+
import org.apache.hadoop.hbase.client.ResultScanner;
13+
import org.apache.hadoop.hbase.client.Scan;
14+
import org.apache.hadoop.hbase.filter.PageFilter;
15+
import org.apache.hadoop.hbase.util.Bytes;
16+
import org.apache.hadoop.hbase.Cell;
17+
import org.apache.hadoop.hbase.CellUtil;
18+
19+
public class RecsysLogs extends HttpServlet {
20+
21+
private static final long serialVersionUID = 4289573629015709424L;
22+
23+
@Override
24+
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
25+
HTableInterface htable = (HTableInterface) getServletContext().getAttribute("htable");
26+
Scan scan = new Scan();
27+
scan.addColumn(Bytes.toBytes("f"), Bytes.toBytes("nums"));
28+
scan.setReversed(true);
29+
// scan.setMaxResultSize(20);
30+
scan.setFilter(new PageFilter(20));
31+
ResultScanner scanner = htable.getScanner(scan);
32+
StringBuilder sb = new StringBuilder();
33+
for (Result res : scanner) {
34+
Cell cell = res.getColumnLatestCell(Bytes.toBytes("f"), Bytes.toBytes("nums"));
35+
Long nums = Bytes.toLong(CellUtil.cloneValue(cell));
36+
String key = Bytes.toString(CellUtil.cloneRow(cell));
37+
sb.append(key + " : " + nums + "\n");
38+
}
39+
scanner.close();
40+
41+
resp.getWriter().write(sb.toString());
42+
}
43+
44+
@Override
45+
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
46+
this.doGet(req, resp);
47+
}
48+
49+
}

‎src/main/resources/hbase-site.xml

+214
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,214 @@
1+
<?xml version="1.0"?>
2+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+
<!--
4+
/**
5+
* Copyright 2010 The Apache Software Foundation
6+
*
7+
* Licensed to the Apache Software Foundation (ASF) under one
8+
* or more contributor license agreements. See the NOTICE file
9+
* distributed with this work for additional information
10+
* regarding copyright ownership. The ASF licenses this file
11+
* to you under the Apache License, Version 2.0 (the
12+
* "License"); you may not use this file except in compliance
13+
* with the License. You may obtain a copy of the License at
14+
*
15+
* http://www.apache.org/licenses/LICENSE-2.0
16+
*
17+
* Unless required by applicable law or agreed to in writing, software
18+
* distributed under the License is distributed on an "AS IS" BASIS,
19+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20+
* See the License for the specific language governing permissions and
21+
* limitations under the License.
22+
*/
23+
-->
24+
<configuration>
25+
<property>
26+
<name>hbase.cluster.distributed</name>
27+
<value>true</value>
28+
</property>
29+
30+
<property>
31+
<name>hbase.rootdir</name>
32+
<value>hdfs://dwztgame/hbase</value>
33+
</property>
34+
35+
<property>
36+
<name>dfs.datanode.max.xcievers</name>
37+
<value>40960</value>
38+
</property>
39+
40+
<property>
41+
<name>hbase.zookeeper.quorum</name>
42+
<value>node01.dw.ztgame.com:2181,node02.dw.ztgame.com:2181,node03.dw.ztgame.com:2181</value>
43+
</property>
44+
45+
<property>
46+
<name>hbase.regionserver.handler.count</name>
47+
<value>200</value>
48+
<description>Count of RPC Server instances spun up on RegionServers
49+
Same property is used by the Master for count of master handlers.
50+
Default is 10.</description>
51+
</property>
52+
53+
<property>
54+
<name>hbase.regionserver.flushlogentries</name>
55+
<value>500</value>
56+
<description>Sync the HLog to HDFS when it has accumulated this many
57+
entries. Default 1. Value is checked on every HLog.hflush</description>
58+
</property>
59+
60+
<property>
61+
<name>hbase.regionserver.optionallogflushinterval</name>
62+
<value>2000</value>
63+
<description>Sync the HLog to the HDFS after this interval if it has not
64+
accumulated enough entries to trigger a sync. Default 1 second. Units:
65+
milliseconds. </description>
66+
</property>
67+
68+
<property>
69+
<name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
70+
<value>600000</value>
71+
<description>How often a region server runs the split/compaction check. </description>
72+
</property>
73+
74+
<property>
75+
<name>hbase.regions.slop</name>
76+
<value>0</value>
77+
<description>Rebalance if any regionserver has average + (average * slop) regions.
78+
Default is 0% slop. </description>
79+
</property>
80+
81+
<property>
82+
<name>hbase.server.thread.wakefrequency</name>
83+
<value>5000</value>
84+
<description>Time to sleep in between searches for work (in milliseconds).
85+
Used as sleep interval by service threads such as log roller. </description>
86+
</property>
87+
88+
<property>
89+
<name>hbase.hregion.memstore.flush.size</name>
90+
<value>134217728</value>
91+
<description>Memstore will be flushed to disk if size of the memstore
92+
exceeds this number of bytes. Value is checked by a thread that runs
93+
every hbase.server.thread.wakefrequency.</description>
94+
</property>
95+
96+
<property>
97+
<name>hbase.hregion.memstore.block.multiplier</name>
98+
<value>6</value>
99+
<description>
100+
Block updates if memstore has hbase.hregion.block.memstore
101+
time hbase.hregion.flush.size bytes. Useful preventing
102+
runaway memstore during spikes in update traffic. Without an
103+
upper-bound, memstore fills such that when it flushes the
104+
resultant flush files take a long time to compact or split, or
105+
worse, we OOME. </description>
106+
</property>
107+
108+
<property>
109+
<name>hbase.hregion.memstore.mslab.enabled</name>
110+
<value>true</value>
111+
<description> Experimental: Enables the MemStore-Local Allocation Buffer,
112+
a feature which works to prevent heap fragmentation under
113+
heavy write loads. This can reduce the frequency of stop-the-world
114+
GC pauses on large heaps.</description>
115+
</property>
116+
117+
<property>
118+
<name>hfile.block.cache.size</name>
119+
<value>0.2</value>
120+
<description> Percentage of maximum heap (-Xmx setting) to allocate to block cache
121+
used by HFile/StoreFile. Default of 0.2 means allocate 20%.
122+
Set to 0 to disable. </description>
123+
</property>
124+
125+
<property>
126+
<name>hbase.regionserver.nbreservationblocks</name>
127+
<value>10</value>
128+
<description>The number of resevoir blocks of memory release on
129+
OOME so we can cleanup properly before server shutdown.</description>
130+
</property>
131+
132+
<property>
133+
<name>hbase.regionserver.global.memstore.upperLimit</name>
134+
<value>0.5</value>
135+
<description>Maximum size of all memstores in a region server before new
136+
updates are blocked and flushes are forced. Defaults to 40% of heap</description>
137+
</property>
138+
139+
<property>
140+
<name>hbase.regionserver.global.memstore.lowerLimit</name>
141+
<value>0.4</value>
142+
<description>When memstores are being forced to flush to make room in
143+
memory, keep flushing until we hit this mark. Defaults to 35% of heap.
144+
This value equaggl to hbase.regionserver.global.memstore.upperLimit causes
145+
the minimum possible flushing to occur when updates are blocked due to
146+
memstore limiting.</description>
147+
</property>
148+
149+
<property>
150+
<name>hbase.hregion.max.filesize</name>
151+
<value>2684354560</value>
152+
<description>
153+
Maximum HStoreFile size. If any one of a column families' HStoreFiles has
154+
grown to exceed this value, the hosting HRegion is split in two.
155+
Default: 256M.</description>
156+
</property>
157+
158+
<property>
159+
<name>hbase.snapshot.enabled</name>
160+
<value>true</value>
161+
</property>
162+
163+
<property>
164+
<name>hbase.regionserver.regionSplitLimit</name>
165+
<value>200</value>
166+
<description>Limit for the number of regions after which no more region
167+
splitting should take place. This is not a hard limit for the number of
168+
regions but acts as a guideline for the regionserver to stop splitting after
169+
a certain limit. Default is set to MAX_INT; i.e. do not block splitting.</description>
170+
</property>
171+
172+
<property>
173+
<name>hbase.hstore.compactionThreshold</name>
174+
<value>4</value>
175+
<description>If more than this number of HStoreFiles in any one HStore
176+
(one HStoreFile is written per flush of memstore) then a compaction
177+
is run to rewrite all HStoreFiles files as one. Larger numbers
178+
put off compaction but when it runs, it takes longer to complete. </description>
179+
</property>
180+
181+
<property>
182+
<name>hbase.hstore.blockingStoreFiles</name>
183+
<value>12</value>
184+
<description>If more than this number of StoreFiles in any one Store
185+
(one StoreFile is written per flush of MemStore) then updates are
186+
blocked for this HRegion until a compaction is completed, or
187+
until hbase.hstore.blockingWaitTime has been exceeded. </description>
188+
</property>
189+
190+
<property>
191+
<name>hbase.hstore.compaction.max</name>
192+
<value>6</value>
193+
<description>Max number of HStoreFiles to compact per 'minor' compaction.</description>
194+
</property>
195+
196+
<property>
197+
<name>hbase.hregion.majorcompaction</name>
198+
<value>172800000</value>
199+
<description>The time (in miliseconds) between 'major' compactions of all
200+
HStoreFiles in a region. Default: 1 day.
201+
.set to 0 to disable automated major compactions. </description>
202+
</property>
203+
204+
<property>
205+
<name>io.storefile.bloom.enabled</name>
206+
<value>true</value>
207+
</property>
208+
209+
<property>
210+
<name>hbase.replication</name>
211+
<value>true</value>
212+
</property>
213+
214+
</configuration>

‎src/main/resources/logback.xml

+53
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
<?xml version="1.0" encoding="UTF-8" ?>
2+
<configuration>
3+
<jmxConfigurator />
4+
<!-- 以<configuration>开头,后面有零个或多个<appender>元素 -->
5+
<!-- 控制台输出日志 -->
6+
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
7+
<layout class="ch.qos.logback.classic.PatternLayout">
8+
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
9+
</layout>
10+
</appender>
11+
<!-- 文件输出日志 (文件大小策略进行文件输出,超过指定大小对文件备份) -->
12+
<appender name="FILE"
13+
class="ch.qos.logback.core.rolling.RollingFileAppender">
14+
<File>logs/recsys.log</File>
15+
<rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
16+
<FileNamePattern>logs/recsys.log.%i.bak</FileNamePattern>
17+
<MinIndex>1</MinIndex>
18+
<MaxIndex>12</MaxIndex>
19+
</rollingPolicy>
20+
<triggeringPolicy
21+
class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
22+
<MaxFileSize>100MB</MaxFileSize>
23+
</triggeringPolicy>
24+
<layout class="ch.qos.logback.classic.PatternLayout">
25+
<Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</Pattern>
26+
</layout>
27+
</appender>
28+
<!--这里指定logger name 是为jmx设置日志级别做铺垫 -->
29+
<!-- 有零个或多个<logger>元素,有最多一个<root>元素 .
30+
root 元素定义日志级别,日志输出方式等;
31+
logger可以指定一个类的logger,也可以指定一个包的logger,特定logger可以重新指定日志级别,输出方式(输出方式为新增的输出,默认root还是会生效)等信息
32+
-->
33+
<!--
34+
<root level="INFO">
35+
<appender-ref ref="STDOUT" />
36+
</root>
37+
38+
<logger name="com.wankun.textsplit.TestLogback" level="DEBUG">
39+
<appender-ref ref="STDOUT" />
40+
<appender-ref ref="FILE" />
41+
</logger>
42+
43+
<logger name="com.wankun.textsplit">
44+
<level value="DEBUG" />
45+
<appender-ref ref="STDOUT" />
46+
<appender-ref ref="FILE" />
47+
</logger>
48+
-->
49+
<root level="DEBUG">
50+
<appender-ref ref="STDOUT" />
51+
<appender-ref ref="FILE" />
52+
</root>
53+
</configuration>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
<html>
2+
<head>
3+
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
4+
<title>recsys logs 监控系统</title>
5+
</head>
6+
7+
<body>
8+
<h2>欢迎使用recsys logs 监控系统!<h2> <br>
9+
<a href="/monitor">查看日志最新情况</a>
10+
</body>
11+
</html>
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

‎target/classes/hbase-site.xml

+214
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,214 @@
1+
<?xml version="1.0"?>
2+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+
<!--
4+
/**
5+
* Copyright 2010 The Apache Software Foundation
6+
*
7+
* Licensed to the Apache Software Foundation (ASF) under one
8+
* or more contributor license agreements. See the NOTICE file
9+
* distributed with this work for additional information
10+
* regarding copyright ownership. The ASF licenses this file
11+
* to you under the Apache License, Version 2.0 (the
12+
* "License"); you may not use this file except in compliance
13+
* with the License. You may obtain a copy of the License at
14+
*
15+
* http://www.apache.org/licenses/LICENSE-2.0
16+
*
17+
* Unless required by applicable law or agreed to in writing, software
18+
* distributed under the License is distributed on an "AS IS" BASIS,
19+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20+
* See the License for the specific language governing permissions and
21+
* limitations under the License.
22+
*/
23+
-->
24+
<configuration>
25+
<property>
26+
<name>hbase.cluster.distributed</name>
27+
<value>true</value>
28+
</property>
29+
30+
<property>
31+
<name>hbase.rootdir</name>
32+
<value>hdfs://dwztgame/hbase</value>
33+
</property>
34+
35+
<property>
36+
<name>dfs.datanode.max.xcievers</name>
37+
<value>40960</value>
38+
</property>
39+
40+
<property>
41+
<name>hbase.zookeeper.quorum</name>
42+
<value>node01.dw.ztgame.com:2181,node02.dw.ztgame.com:2181,node03.dw.ztgame.com:2181</value>
43+
</property>
44+
45+
<property>
46+
<name>hbase.regionserver.handler.count</name>
47+
<value>200</value>
48+
<description>Count of RPC Server instances spun up on RegionServers
49+
Same property is used by the Master for count of master handlers.
50+
Default is 10.</description>
51+
</property>
52+
53+
<property>
54+
<name>hbase.regionserver.flushlogentries</name>
55+
<value>500</value>
56+
<description>Sync the HLog to HDFS when it has accumulated this many
57+
entries. Default 1. Value is checked on every HLog.hflush</description>
58+
</property>
59+
60+
<property>
61+
<name>hbase.regionserver.optionallogflushinterval</name>
62+
<value>2000</value>
63+
<description>Sync the HLog to the HDFS after this interval if it has not
64+
accumulated enough entries to trigger a sync. Default 1 second. Units:
65+
milliseconds. </description>
66+
</property>
67+
68+
<property>
69+
<name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
70+
<value>600000</value>
71+
<description>How often a region server runs the split/compaction check. </description>
72+
</property>
73+
74+
<property>
75+
<name>hbase.regions.slop</name>
76+
<value>0</value>
77+
<description>Rebalance if any regionserver has average + (average * slop) regions.
78+
Default is 0% slop. </description>
79+
</property>
80+
81+
<property>
82+
<name>hbase.server.thread.wakefrequency</name>
83+
<value>5000</value>
84+
<description>Time to sleep in between searches for work (in milliseconds).
85+
Used as sleep interval by service threads such as log roller. </description>
86+
</property>
87+
88+
<property>
89+
<name>hbase.hregion.memstore.flush.size</name>
90+
<value>134217728</value>
91+
<description>Memstore will be flushed to disk if size of the memstore
92+
exceeds this number of bytes. Value is checked by a thread that runs
93+
every hbase.server.thread.wakefrequency.</description>
94+
</property>
95+
96+
<property>
97+
<name>hbase.hregion.memstore.block.multiplier</name>
98+
<value>6</value>
99+
<description>
100+
Block updates if memstore has hbase.hregion.block.memstore
101+
time hbase.hregion.flush.size bytes. Useful preventing
102+
runaway memstore during spikes in update traffic. Without an
103+
upper-bound, memstore fills such that when it flushes the
104+
resultant flush files take a long time to compact or split, or
105+
worse, we OOME. </description>
106+
</property>
107+
108+
<property>
109+
<name>hbase.hregion.memstore.mslab.enabled</name>
110+
<value>true</value>
111+
<description> Experimental: Enables the MemStore-Local Allocation Buffer,
112+
a feature which works to prevent heap fragmentation under
113+
heavy write loads. This can reduce the frequency of stop-the-world
114+
GC pauses on large heaps.</description>
115+
</property>
116+
117+
<property>
118+
<name>hfile.block.cache.size</name>
119+
<value>0.2</value>
120+
<description> Percentage of maximum heap (-Xmx setting) to allocate to block cache
121+
used by HFile/StoreFile. Default of 0.2 means allocate 20%.
122+
Set to 0 to disable. </description>
123+
</property>
124+
125+
<property>
126+
<name>hbase.regionserver.nbreservationblocks</name>
127+
<value>10</value>
128+
<description>The number of resevoir blocks of memory release on
129+
OOME so we can cleanup properly before server shutdown.</description>
130+
</property>
131+
132+
<property>
133+
<name>hbase.regionserver.global.memstore.upperLimit</name>
134+
<value>0.5</value>
135+
<description>Maximum size of all memstores in a region server before new
136+
updates are blocked and flushes are forced. Defaults to 40% of heap</description>
137+
</property>
138+
139+
<property>
140+
<name>hbase.regionserver.global.memstore.lowerLimit</name>
141+
<value>0.4</value>
142+
<description>When memstores are being forced to flush to make room in
143+
memory, keep flushing until we hit this mark. Defaults to 35% of heap.
144+
This value equaggl to hbase.regionserver.global.memstore.upperLimit causes
145+
the minimum possible flushing to occur when updates are blocked due to
146+
memstore limiting.</description>
147+
</property>
148+
149+
<property>
150+
<name>hbase.hregion.max.filesize</name>
151+
<value>2684354560</value>
152+
<description>
153+
Maximum HStoreFile size. If any one of a column families' HStoreFiles has
154+
grown to exceed this value, the hosting HRegion is split in two.
155+
Default: 256M.</description>
156+
</property>
157+
158+
<property>
159+
<name>hbase.snapshot.enabled</name>
160+
<value>true</value>
161+
</property>
162+
163+
<property>
164+
<name>hbase.regionserver.regionSplitLimit</name>
165+
<value>200</value>
166+
<description>Limit for the number of regions after which no more region
167+
splitting should take place. This is not a hard limit for the number of
168+
regions but acts as a guideline for the regionserver to stop splitting after
169+
a certain limit. Default is set to MAX_INT; i.e. do not block splitting.</description>
170+
</property>
171+
172+
<property>
173+
<name>hbase.hstore.compactionThreshold</name>
174+
<value>4</value>
175+
<description>If more than this number of HStoreFiles in any one HStore
176+
(one HStoreFile is written per flush of memstore) then a compaction
177+
is run to rewrite all HStoreFiles files as one. Larger numbers
178+
put off compaction but when it runs, it takes longer to complete. </description>
179+
</property>
180+
181+
<property>
182+
<name>hbase.hstore.blockingStoreFiles</name>
183+
<value>12</value>
184+
<description>If more than this number of StoreFiles in any one Store
185+
(one StoreFile is written per flush of MemStore) then updates are
186+
blocked for this HRegion until a compaction is completed, or
187+
until hbase.hstore.blockingWaitTime has been exceeded. </description>
188+
</property>
189+
190+
<property>
191+
<name>hbase.hstore.compaction.max</name>
192+
<value>6</value>
193+
<description>Max number of HStoreFiles to compact per 'minor' compaction.</description>
194+
</property>
195+
196+
<property>
197+
<name>hbase.hregion.majorcompaction</name>
198+
<value>172800000</value>
199+
<description>The time (in miliseconds) between 'major' compactions of all
200+
HStoreFiles in a region. Default: 1 day.
201+
.set to 0 to disable automated major compactions. </description>
202+
</property>
203+
204+
<property>
205+
<name>io.storefile.bloom.enabled</name>
206+
<value>true</value>
207+
</property>
208+
209+
<property>
210+
<name>hbase.replication</name>
211+
<value>true</value>
212+
</property>
213+
214+
</configuration>

‎target/classes/logback.xml

+53
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
<?xml version="1.0" encoding="UTF-8" ?>
2+
<configuration>
3+
<jmxConfigurator />
4+
<!-- 以<configuration>开头,后面有零个或多个<appender>元素 -->
5+
<!-- 控制台输出日志 -->
6+
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
7+
<layout class="ch.qos.logback.classic.PatternLayout">
8+
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
9+
</layout>
10+
</appender>
11+
<!-- 文件输出日志 (文件大小策略进行文件输出,超过指定大小对文件备份) -->
12+
<appender name="FILE"
13+
class="ch.qos.logback.core.rolling.RollingFileAppender">
14+
<File>logs/recsys.log</File>
15+
<rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
16+
<FileNamePattern>logs/recsys.log.%i.bak</FileNamePattern>
17+
<MinIndex>1</MinIndex>
18+
<MaxIndex>12</MaxIndex>
19+
</rollingPolicy>
20+
<triggeringPolicy
21+
class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
22+
<MaxFileSize>100MB</MaxFileSize>
23+
</triggeringPolicy>
24+
<layout class="ch.qos.logback.classic.PatternLayout">
25+
<Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</Pattern>
26+
</layout>
27+
</appender>
28+
<!--这里指定logger name 是为jmx设置日志级别做铺垫 -->
29+
<!-- 有零个或多个<logger>元素,有最多一个<root>元素 .
30+
root 元素定义日志级别,日志输出方式等;
31+
logger可以指定一个类的logger,也可以指定一个包的logger,特定logger可以重新指定日志级别,输出方式(输出方式为新增的输出,默认root还是会生效)等信息
32+
-->
33+
<!--
34+
<root level="INFO">
35+
<appender-ref ref="STDOUT" />
36+
</root>
37+
38+
<logger name="com.wankun.textsplit.TestLogback" level="DEBUG">
39+
<appender-ref ref="STDOUT" />
40+
<appender-ref ref="FILE" />
41+
</logger>
42+
43+
<logger name="com.wankun.textsplit">
44+
<level value="DEBUG" />
45+
<appender-ref ref="STDOUT" />
46+
<appender-ref ref="FILE" />
47+
</logger>
48+
-->
49+
<root level="DEBUG">
50+
<appender-ref ref="STDOUT" />
51+
<appender-ref ref="FILE" />
52+
</root>
53+
</configuration>
+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
<html>
2+
<head>
3+
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
4+
<title>recsys logs 监控系统</title>
5+
</head>
6+
7+
<body>
8+
<h2>欢迎使用recsys logs 监控系统!<h2> <br>
9+
<a href="/monitor">查看日志最新情况</a>
10+
</body>
11+
</html>

‎target/maven-archiver/pom.properties

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
#Generated by Maven
2+
#Thu Dec 18 17:52:06 CST 2014
3+
version=1.0
4+
groupId=com.wankun
5+
artifactId=recsys
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
com\wankun\recsys\spark\LogStream.class
2+
com\wankun\recsys\spark\LogStream$2.class
3+
com\wankun\recsys\kafka\TailLog2.class
4+
com\wankun\recsys\spark\RecsysLogs.class
5+
com\wankun\recsys\spark\LogStream$1.class
6+
com\wankun\recsys\kafka\TailLog.class
7+
com\wankun\recsys\spark\LogStream$3.class
8+
com\wankun\recsys\kafka\TailService.class
9+
com\wankun\recsys\kafka\MsgSender.class
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
d:\workspace\recsys\src\main\java\com\wankun\recsys\spark\RecsysLogs.java
2+
d:\workspace\recsys\src\main\java\com\wankun\recsys\kafka\MsgSender.java
3+
d:\workspace\recsys\src\main\java\com\wankun\recsys\kafka\TailLog2.java
4+
d:\workspace\recsys\src\main\java\com\wankun\recsys\kafka\TailLog.java
5+
d:\workspace\recsys\src\main\java\com\wankun\recsys\spark\LogStream.java
6+
d:\workspace\recsys\src\main\java\com\wankun\recsys\kafka\TailService.java

‎target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/inputFiles.lst

Whitespace-only changes.

‎target/recsys-1.0.jar

20 KB
Binary file not shown.

0 commit comments

Comments
 (0)
Please sign in to comment.