前言
日志是排查问题、监控系统的重要手段。Spring Boot 默认使用 Logback,支持结构化日志输出。本文将介绍 Spring Boot 日志系统配置和 ELK 日志聚合方案。
Logback 配置
1. 基础配置
<!-- logback-spring.xml -->
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 引入 Spring Boot 默认配置 -->
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<!-- 控制台输出 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<!-- 文件输出 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>logs/app.log</file>
<encoder>
<pattern>${FILE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>logs/app.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<maxFileSize>100MB</maxFileSize>
<maxHistory>30</maxHistory>
<totalSizeCap>10GB</totalSizeCap>
</rollingPolicy>
</appender>
<!-- 异步输出 -->
<appender name="ASYNC_FILE" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="FILE"/>
<queueSize>512</queueSize>
<discardingThreshold>0</discardingThreshold>
</appender>
<!-- 应用日志 -->
<logger name="com.example.demo" level="INFO"/>
<!-- 第三方库日志 -->
<logger name="org.springframework" level="WARN"/>
<logger name="org.hibernate" level="WARN"/>
<logger name="com.zaxxer" level="WARN"/>
<!-- Root 配置 -->
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="ASYNC_FILE"/>
</root>
</configuration>
2. 多环境配置
<!-- logback-spring.xml -->
<configuration>
<!-- 开发环境 -->
<springProfile name="dev">
<logger name="com.example.demo" level="DEBUG"/>
<root level="DEBUG">
<appender-ref ref="CONSOLE"/>
</root>
</springProfile>
<!-- 测试环境 -->
<springProfile name="test">
<logger name="com.example.demo" level="INFO"/>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
</root>
</springProfile>
<!-- 生产环境 -->
<springProfile name="prod">
<logger name="com.example.demo" level="INFO"/>
<root level="WARN">
<appender-ref ref="ASYNC_FILE"/>
</root>
</springProfile>
</configuration>
3. YAML 配置
logging:
level:
root: INFO
com.example.demo: DEBUG
org.springframework: WARN
org.hibernate: WARN
pattern:
console: "%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n"
file: "%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n"
file:
name: logs/app.log
max-size: 100MB
max-history: 30
total-size-cap: 10GB
结构化日志
1. JSON 格式输出
<!-- 添加依赖 -->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>7.4</version>
</dependency>
<!-- logback-spring.xml -->
<configuration>
<appender name="JSON_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>logs/app.json</file>
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<includeCallerData>true</includeCallerData>
<customFields>{"application":"demo","environment":"${spring.profiles.active:dev}"}</customFields>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>logs/app.%d{yyyy-MM-dd}.%i.json</fileNamePattern>
<maxFileSize>100MB</maxFileSize>
<maxHistory>30</maxHistory>
</rollingPolicy>
</appender>
<root level="INFO">
<appender-ref ref="JSON_FILE"/>
</root>
</configuration>
2. 自定义字段
@Component
public class CustomLogConverter extends JsonProvider<ILoggingEvent> {
@Autowired
private HttpServletRequest request;
@Override
public void writeTo(JsonGenerator jsonGenerator, ILoggingEvent event) throws IOException {
// 添加请求 ID
jsonGenerator.writeStringField("request_id", MDC.get("requestId"));
// 添加用户信息
Authentication auth = SecurityContextHolder.getContext().getAuthentication();
if (auth != null && auth.isAuthenticated()) {
jsonGenerator.writeStringField("user", auth.getName());
}
// 添加响应时间
Long startTime = (Long) request.getAttribute("startTime");
if (startTime != null) {
jsonGenerator.writeNumberField("response_time", System.currentTimeMillis() - startTime);
}
}
}
3. MDC 上下文
@Component
public class LoggingFilter implements Filter {
@Override
public void doFilter(
ServletRequest request,
ServletResponse response,
FilterChain chain
) throws IOException, ServletException {
HttpServletRequest httpRequest = (HttpServletRequest) request;
// 设置 MDC
MDC.put("requestId", UUID.randomUUID().toString());
MDC.put("method", httpRequest.getMethod());
MDC.put("uri", httpRequest.getRequestURI());
MDC.put("ip", httpRequest.getRemoteAddr());
try {
chain.doFilter(request, response);
} finally {
// 清理 MDC
MDC.clear();
}
}
}
@Service
public class OrderService {
public Order createOrder(OrderCreateDTO dto) {
// MDC 自动包含在日志中
log.info("创建订单:userId={}", dto.getUserId());
Order order = orderRepository.save(convert(dto));
log.info("订单创建成功:orderId={}", order.getId());
return order;
}
}
ELK 日志聚合
1. 架构
┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ App │────▶│ Logstash │────▶│Elasticsearch│────▶│ Kibana │
│ (Logback) │ │ (收集) │ │ (存储) │ │ (展示) │
└─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘
2. Logstash 配置
# logstash.conf
input {
tcp {
port => 5000
codec => json_lines
}
}
filter {
# 解析时间
date {
match => ["timestamp", "ISO8601"]
target => "@timestamp"
}
# 添加字段
mutate {
add_field => {
"environment" => "prod"
"application" => "demo"
}
}
# 过滤无用字段
mutate {
remove_field => ["thread", "logger_name"]
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "logs-%{+YYYY.MM.dd}"
}
# 调试输出
stdout {
codec => rubydebug
}
}
3. Docker Compose
# docker-compose.yml
version: '3.8'
services:
elasticsearch:
image: elasticsearch:8.8.0
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
ports:
- "9200:9200"
volumes:
- es-data:/usr/share/elasticsearch/data
logstash:
image: logstash:8.8.0
volumes:
- ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf
ports:
- "5000:5000"
depends_on:
- elasticsearch
kibana:
image: kibana:8.8.0
ports:
- "5601:5601"
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
depends_on:
- elasticsearch
volumes:
es-data:
4. Logback 发送日志
<!-- logback-spring.xml -->
<configuration>
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>localhost:5000</destination>
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<customFields>{"application":"demo"}</customFields>
</encoder>
<reconnectionDelay>5 seconds</reconnectionDelay>
</appender>
<root level="INFO">
<appender-ref ref="LOGSTASH"/>
</root>
</configuration>
Kibana 配置
1. 创建索引模式
- 访问 Kibana: http://localhost:5601
- Stack Management → Index Patterns
- Create index pattern:
logs-* - 选择时间字段:
@timestamp
2. 创建 Dashboard
- Discover → 选择索引模式
- 添加筛选:
application: demo - 保存搜索
- Dashboard → Create dashboard
- 添加可视化图表
3. 常用查询
# 错误日志
level: ERROR
# 特定请求
requestId: "abc-123"
# 特定用户
user: "admin"
# 慢请求
response_time: >1000
# 时间范围
@timestamp: [now-1h TO now]
# 组合查询
level: ERROR AND application: demo AND @timestamp: [now-24h TO now]
日志追踪
1. 链路 ID
@Component
public class TraceIdFilter implements Filter {
@Override
public void doFilter(
ServletRequest request,
ServletResponse response,
FilterChain chain
) throws IOException, ServletException {
// 从请求头获取或生成 Trace ID
HttpServletRequest httpRequest = (HttpServletRequest) request;
String traceId = httpRequest.getHeader("X-Trace-Id");
if (traceId == null || traceId.isEmpty()) {
traceId = UUID.randomUUID().toString();
}
MDC.put("traceId", traceId);
((HttpServletResponse) response).setHeader("X-Trace-Id", traceId);
try {
chain.doFilter(request, response);
} finally {
MDC.clear();
}
}
}
2. 异步日志
@Configuration
public class AsyncLogConfig {
@Bean
public Executor logExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setCorePoolSize(4);
executor.setMaxPoolSize(8);
executor.setQueueCapacity(100);
executor.setThreadNamePrefix("async-log-");
return executor;
}
}
最佳实践
1. 日志级别
// ✅ 推荐
log.debug("调试信息:{}", data); // 开发调试
log.info("业务信息:{}", result); // 业务记录
log.warn("警告信息:{}", issue); // 潜在问题
log.error("错误信息:{}", error, e); // 错误异常
// ❌ 不推荐
log.debug("用户 ID: " + userId); // 使用占位符
System.out.println("调试"); // 使用日志框架
2. 敏感信息
// ✅ 推荐
log.info("用户登录:username={}", mask(username));
// ❌ 不推荐
log.info("用户登录:{}", user); // 可能包含敏感信息
log.info("密码:{}", password); // 禁止记录密码
3. 日志轮转
logging:
file:
max-size: 100MB
max-history: 30
total-size-cap: 10GB
4. 性能考虑
// ✅ 推荐
if (log.isDebugEnabled()) {
log.debug("调试:{}", expensiveOperation());
}
// ❌ 不推荐
log.debug("调试:{}", expensiveOperation()); // 即使不开启 debug 也会执行
总结
日志系统要点:
- ✅ Logback 配置 - 多环境、异步输出
- ✅ 结构化日志 - JSON 格式、自定义字段
- ✅ MDC 上下文 - 请求追踪、用户信息
- ✅ ELK 聚合 - Logstash、Elasticsearch、Kibana
- ✅ 日志追踪 - Trace ID、链路追踪
- ✅ 最佳实践 - 日志级别、敏感信息、性能
良好的日志系统是生产运维的基础。