Go 微服务架构实战
Go 是微服务架构的理想语言,理解微服务核心组件有助于构建可扩展的分布式系统。
一、服务通信
1.1 gRPC 基础
// proto 定义
syntax = "proto3";
service UserService {
rpc GetUser(GetUserRequest) returns (GetUserResponse);
rpc CreateUser(CreateUserRequest) returns (CreateUserResponse);
}
message GetUserRequest {
string id = 1;
}
message GetUserResponse {
string id = 1;
string name = 2;
string email = 3;
}
// 服务端
type UserServiceImpl struct {
pb.UnimplementedUserServiceServer
}
func (s *UserServiceImpl) GetUser(
ctx context.Context,
req *pb.GetUserRequest,
) (*pb.GetUserResponse, error) {
user := getUserByID(req.Id)
return &pb.GetUserResponse{
Id: user.ID,
Name: user.Name,
Email: user.Email,
}, nil
}
// 客户端
conn, _ := grpc.Dial("localhost:50051",
grpc.WithTransportCredentials(insecure.NewCredentials()))
client := pb.NewUserServiceClient(conn)
resp, _ := client.GetUser(ctx, &pb.GetUserRequest{Id: "123"})
1.2 HTTP REST
// Gin + REST
r := gin.Default()
r.GET("/users/:id", func(c *gin.Context) {
id := c.Param("id")
user := getUserByID(id)
c.JSON(200, user)
})
r.POST("/users", func(c *gin.Context) {
var req CreateUserRequest
c.ShouldBindJSON(&req)
user := createUser(req)
c.JSON(201, user)
})
1.3 服务选择
gRPC 适用场景:
- 内部服务通信
- 高性能要求
- 强类型接口
REST 适用场景:
- 对外 API
- 需要人类可读
- 简单集成
二、服务发现
2.1 Consul 集成
import (
consul "github.com/hashicorp/consul/api"
)
// 服务注册
func registerService(name, address string, port int) error {
client, _ := consul.NewClient(consul.DefaultConfig())
registration := &consul.AgentServiceRegistration{
ID: fmt.Sprintf("%s-%d", name, port),
Name: name,
Address: address,
Port: port,
Check: &consul.AgentServiceCheck{
HTTP: fmt.Sprintf("http://%s:%d/health", address, port),
Interval: "10s",
Timeout: "5s",
},
}
return client.Agent().ServiceRegister(registration)
}
// 服务发现
func discoverService(name string) ([]string, error) {
client, _ := consul.NewClient(consul.DefaultConfig())
services, _, _ := client.Health().Service(name, "", true, nil)
var addresses []string
for _, s := range services {
addresses = append(addresses,
fmt.Sprintf("%s:%d", s.Service.Address, s.Service.Port))
}
return addresses, nil
}
2.2 负载均衡
// 轮询负载均衡
type LoadBalancer struct {
targets []string
current int
mu sync.RWMutex
}
func (lb *LoadBalancer) Next() string {
lb.mu.Lock()
defer lb.mu.Unlock()
target := lb.targets[lb.current]
lb.current = (lb.current + 1) % len(lb.targets)
return target
}
// 使用
lb := &LoadBalancer{
targets: []string{"server1:8080", "server2:8080"},
}
target := lb.Next()
conn, _ := grpc.Dial(target, grpc.WithInsecure())
三、链路追踪
3.1 OpenTelemetry
import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/jaeger"
"go.opentelemetry.io/otel/sdk/trace"
)
// 初始化
func initTracer() func() {
exporter, _ := jaeger.New(
jaeger.WithCollectorEndpoint(
jaeger.WithEndpoint("http://localhost:14268/api/traces")))
tp := trace.NewTracerProvider(
trace.WithBatcher(exporter),
trace.WithSampler(trace.AlwaysSample()),
)
otel.SetTracerProvider(tp)
return func() { tp.Shutdown(context.Background()) }
}
// 使用
func handler(w http.ResponseWriter, r *http.Request) {
ctx, span := otel.Tracer("myservice").Start(r.Context(), "handler")
defer span.End()
// 业务逻辑
process(ctx)
}
3.2 日志关联
// 关联 TraceID
type tracingLogger struct {
logger *zap.Logger
}
func (l *tracingLogger) Info(msg string, fields ...zap.Field) {
traceID := getTraceID()
fields = append(fields, zap.String("trace_id", traceID))
l.logger.Info(msg, fields...)
}
// 使用
logger := &tracingLogger{logger: zap.NewDefault()}
logger.Info("Processing request")
四、容错机制
4.1 重试机制
func callWithRetry(fn func() error, maxRetries int) error {
var lastErr error
for i := 0; i < maxRetries; i++ {
if err := fn(); err == nil {
return nil
} else {
lastErr = err
time.Sleep(time.Duration(i+1) * time.Second)
}
}
return lastErr
}
// 使用
callWithRetry(func() error {
return callExternalService()
}, 3)
4.2 熔断器
import "github.com/afex/hystrix-go/hystrix"
// 配置
hystrix.ConfigureCommand("external_service", hystrix.CommandConfig{
Timeout: 1000,
MaxConcurrentRequests: 100,
ErrorPercentThreshold: 25,
SleepWindow: 5000,
})
// 使用
err := hystrix.Do("external_service", func() error {
return callExternalService()
}, func(err error) error {
// fallback
return nil
})
4.3 限流
// 令牌桶限流
type RateLimiter struct {
tokens int
maxTokens int
interval time.Duration
}
func (rl *RateLimiter) Allow() bool {
if rl.tokens > 0 {
rl.tokens--
return true
}
return false
}
// 使用
limiter := &RateLimiter{
tokens: 100,
maxTokens: 100,
interval: time.Second,
}
func handler(w http.ResponseWriter, r *http.Request) {
if !limiter.Allow() {
http.Error(w, "Rate limited", 429)
return
}
// 处理请求
}
五、配置管理
5.1 Viper 配置
import "github.com/spf13/viper"
// 初始化
viper.SetConfigName("config")
viper.SetConfigType("yaml")
viper.AddConfigPath("./configs")
viper.ReadInConfig()
// 使用
port := viper.GetInt("server.port")
dbName := viper.GetString("database.name")
// 监听配置变化
viper.WatchConfig()
viper.OnConfigChange(func(e fsnotify.Event) {
// 重新加载配置
})
5.2 环境变量
// 从环境变量读取
viper.AutomaticEnv()
// 绑定环境变量
viper.BindEnv("server.port", "PORT")
// 使用
port := viper.GetInt("server.port") // 读取 PORT 环境变量
六、最佳实践
6.1 项目结构
project/
├── cmd/
│ └── server/
│ └── main.go
├── internal/
│ ├── handler/
│ ├── service/
│ ├── repository/
│ └── model/
├── pkg/
│ └── grpc/
├── api/
│ └── proto/
└── configs/
6.2 健康检查
r.GET("/health", func(c *gin.Context) {
c.JSON(200, gin.H{
"status": "healthy",
"timestamp": time.Now().Format(time.RFC3339),
})
})
r.GET("/ready", func(c *gin.Context) {
// 检查依赖服务
if err := checkDatabase(); err != nil {
c.JSON(503, gin.H{"status": "not ready"})
return
}
c.JSON(200, gin.H{"status": "ready"})
})
6.3 优雅关闭
srv := &http.Server{
Addr: ":8080",
Handler: r,
}
// 监听信号
quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt, syscall.SIGTERM)
<-quit
// 优雅关闭
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
log.Fatal("Server forced to shutdown:", err)
}
七、总结
Go 微服务核心要点:
| 组件 | 技术选型 | 说明 |
|---|---|---|
| 通信 | gRPC/REST | 内部用 gRPC,对外用 REST |
| 发现 | Consul | 服务注册与发现 |
| 追踪 | OpenTelemetry | 链路追踪 |
| 容错 | 重试/熔断/限流 | 提高可用性 |
| 配置 | Viper | 统一配置管理 |
微服务架构需要平衡复杂性和可扩展性,根据业务规模选择合适的技术方案。