Skip to content

Commit aa1f970

Browse files
committed
fix linter issues
1 parent 908d602 commit aa1f970

File tree

3 files changed

+19
-59
lines changed

3 files changed

+19
-59
lines changed

maintnotifications/e2e/cmd/proxy-fi-server/Dockerfile

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,21 @@ FROM alpine:latest
1919

2020
RUN apk --no-cache add ca-certificates
2121

22+
# Create a non-root user
23+
RUN addgroup -g 1000 appuser && \
24+
adduser -D -u 1000 -G appuser appuser
25+
2226
WORKDIR /app
2327

2428
# Copy the binary from builder
2529
COPY --from=builder /proxy-fi-server .
2630

31+
# Change ownership of the app directory
32+
RUN chown -R appuser:appuser /app
33+
34+
# Switch to non-root user
35+
USER appuser
36+
2737
# Expose the fault injector API port
2838
EXPOSE 5000
2939

maintnotifications/e2e/notiftracker.go

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -249,62 +249,6 @@ func (tnh *TrackingNotificationsHook) increaseRelaxedTimeoutCount(notificationTy
249249
}
250250
}
251251

252-
// setupNotificationHook sets up tracking for both regular and cluster clients with notification hooks
253-
func setupNotificationHook(client redis.UniversalClient, hook maintnotifications.NotificationHook) {
254-
if clusterClient, ok := client.(*redis.ClusterClient); ok {
255-
setupClusterClientNotificationHook(clusterClient, hook)
256-
} else if regularClient, ok := client.(*redis.Client); ok {
257-
setupRegularClientNotificationHook(regularClient, hook)
258-
}
259-
}
260-
261-
// setupNotificationHooks sets up tracking for both regular and cluster clients with notification hooks
262-
func setupNotificationHooks(client redis.UniversalClient, hooks ...maintnotifications.NotificationHook) {
263-
for _, hook := range hooks {
264-
setupNotificationHook(client, hook)
265-
}
266-
}
267-
268-
// setupRegularClientNotificationHook sets up notification hook for regular clients
269-
func setupRegularClientNotificationHook(client *redis.Client, hook maintnotifications.NotificationHook) {
270-
maintnotificationsManager := client.GetMaintNotificationsManager()
271-
if maintnotificationsManager != nil {
272-
maintnotificationsManager.AddNotificationHook(hook)
273-
} else {
274-
fmt.Printf("[TNH] Warning: Maintenance notifications manager not available for tracking\n")
275-
}
276-
}
277-
278-
// setupClusterClientNotificationHook sets up notification hook for cluster clients
279-
func setupClusterClientNotificationHook(client *redis.ClusterClient, hook maintnotifications.NotificationHook) {
280-
ctx := context.Background()
281-
282-
// Register hook on existing nodes
283-
err := client.ForEachShard(ctx, func(ctx context.Context, nodeClient *redis.Client) error {
284-
maintnotificationsManager := nodeClient.GetMaintNotificationsManager()
285-
if maintnotificationsManager != nil {
286-
maintnotificationsManager.AddNotificationHook(hook)
287-
} else {
288-
fmt.Printf("[TNH] Warning: Maintenance notifications manager not available for tracking on node: %s\n", nodeClient.Options().Addr)
289-
}
290-
return nil
291-
})
292-
293-
if err != nil {
294-
fmt.Printf("[TNH] Warning: Failed to register timeout tracking hooks on existing cluster nodes: %v\n", err)
295-
}
296-
297-
// Register hook on new nodes
298-
client.OnNewNode(func(nodeClient *redis.Client) {
299-
maintnotificationsManager := nodeClient.GetMaintNotificationsManager()
300-
if maintnotificationsManager != nil {
301-
maintnotificationsManager.AddNotificationHook(hook)
302-
} else {
303-
fmt.Printf("[TNH] Warning: Maintenance notifications manager not available for tracking on new node: %s\n", nodeClient.Options().Addr)
304-
}
305-
})
306-
}
307-
308252
func (tnh *TrackingNotificationsHook) GetAnalysis() *DiagnosticsAnalysis {
309253
return NewDiagnosticsAnalysis(tnh.GetDiagnosticsLog())
310254
}

maintnotifications/e2e/proxy_fault_injector_server.go

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,9 @@ func (s *ProxyFaultInjectorServer) Start() error {
119119

120120
targetPort := 6379
121121
if portStr := os.Getenv("REDIS_TARGET_PORT"); portStr != "" {
122-
fmt.Sscanf(portStr, "%d", &targetPort)
122+
if _, err := fmt.Sscanf(portStr, "%d", &targetPort); err != nil {
123+
return fmt.Errorf("invalid REDIS_TARGET_PORT: %w", err)
124+
}
123125
}
124126

125127
// Parse cluster addresses
@@ -130,7 +132,9 @@ func (s *ProxyFaultInjectorServer) Start() error {
130132

131133
// Extract first port for initial node
132134
var initialPort int
133-
fmt.Sscanf(strings.Split(addrs[0], ":")[1], "%d", &initialPort)
135+
if _, err := fmt.Sscanf(strings.Split(addrs[0], ":")[1], "%d", &initialPort); err != nil {
136+
return fmt.Errorf("invalid port in cluster address %s: %w", addrs[0], err)
137+
}
134138

135139
// Check if proxy is already running (e.g., in Docker)
136140
proxyAlreadyRunning := false
@@ -181,7 +185,9 @@ func (s *ProxyFaultInjectorServer) Start() error {
181185
if !proxyAlreadyRunning {
182186
for i := 1; i < len(addrs); i++ {
183187
var port int
184-
fmt.Sscanf(strings.Split(addrs[i], ":")[1], "%d", &port)
188+
if _, err := fmt.Sscanf(strings.Split(addrs[i], ":")[1], "%d", &port); err != nil {
189+
return fmt.Errorf("invalid port in cluster address %s: %w", addrs[i], err)
190+
}
185191
if err := s.addProxyNode(port, targetPort, targetHost); err != nil {
186192
return fmt.Errorf("failed to add node %d: %w", i, err)
187193
}

0 commit comments

Comments
 (0)