|
1 | 1 | package jobs_test |
2 | 2 |
|
3 | 3 | import ( |
| 4 | + "fmt" |
4 | 5 | "sync" |
5 | 6 | "testing" |
6 | 7 | "time" |
@@ -252,3 +253,175 @@ func TestProjectCommandOutputHandler(t *testing.T) { |
252 | 253 | assert.True(t, <-opComplete) |
253 | 254 | }) |
254 | 255 | } |
| 256 | + |
| 257 | +// TestRaceConditionPrevention tests that our fixes prevent the specific race conditions |
| 258 | +func TestRaceConditionPrevention(t *testing.T) { |
| 259 | + logger := logging.NewNoopLogger(t) |
| 260 | + prjCmdOutputChan := make(chan *jobs.ProjectCmdOutputLine) |
| 261 | + handler := jobs.NewAsyncProjectCommandOutputHandler(prjCmdOutputChan, logger) |
| 262 | + |
| 263 | + // Start the handler |
| 264 | + go handler.Handle() |
| 265 | + |
| 266 | + ctx := createTestProjectCmdContext(t) |
| 267 | + pullInfo := jobs.PullInfo{ |
| 268 | + PullNum: ctx.Pull.Num, |
| 269 | + Repo: ctx.BaseRepo.Name, |
| 270 | + RepoFullName: ctx.BaseRepo.FullName, |
| 271 | + ProjectName: ctx.ProjectName, |
| 272 | + Path: ctx.RepoRelDir, |
| 273 | + Workspace: ctx.Workspace, |
| 274 | + } |
| 275 | + |
| 276 | + t.Run("concurrent pullToJobMapping access", func(t *testing.T) { |
| 277 | + var wg sync.WaitGroup |
| 278 | + numGoroutines := 50 |
| 279 | + |
| 280 | + // This test specifically targets the original race condition |
| 281 | + // that was fixed by using sync.Map for pullToJobMapping |
| 282 | + |
| 283 | + // Concurrent writers (Handle() method updates the mapping) |
| 284 | + for i := 0; i < numGoroutines; i++ { |
| 285 | + wg.Add(1) |
| 286 | + go func(id int) { |
| 287 | + defer wg.Done() |
| 288 | + // Send message which triggers Handle() to update pullToJobMapping |
| 289 | + handler.Send(ctx, fmt.Sprintf("message-%d", id), false) |
| 290 | + }(i) |
| 291 | + } |
| 292 | + |
| 293 | + // Concurrent readers (GetPullToJobMapping() method reads the mapping) |
| 294 | + for i := 0; i < numGoroutines; i++ { |
| 295 | + wg.Add(1) |
| 296 | + go func() { |
| 297 | + defer wg.Done() |
| 298 | + // This would race with Handle() before the sync.Map fix |
| 299 | + mappings := handler.GetPullToJobMapping() |
| 300 | + _ = mappings |
| 301 | + }() |
| 302 | + } |
| 303 | + |
| 304 | + // Concurrent readers of GetJobIDMapForPull |
| 305 | + for i := 0; i < numGoroutines; i++ { |
| 306 | + wg.Add(1) |
| 307 | + go func() { |
| 308 | + defer wg.Done() |
| 309 | + // This would also race with Handle() before the fix |
| 310 | + jobMap := handler.(*jobs.AsyncProjectCommandOutputHandler).GetJobIDMapForPull(pullInfo) |
| 311 | + _ = jobMap |
| 312 | + }() |
| 313 | + } |
| 314 | + |
| 315 | + wg.Wait() |
| 316 | + }) |
| 317 | + |
| 318 | + t.Run("concurrent buffer access", func(t *testing.T) { |
| 319 | + var wg sync.WaitGroup |
| 320 | + numGoroutines := 30 |
| 321 | + |
| 322 | + // First populate some data |
| 323 | + handler.Send(ctx, "initial", false) |
| 324 | + time.Sleep(5 * time.Millisecond) |
| 325 | + |
| 326 | + // Test the race condition we fixed in GetProjectOutputBuffer |
| 327 | + for i := 0; i < numGoroutines; i++ { |
| 328 | + wg.Add(1) |
| 329 | + go func() { |
| 330 | + defer wg.Done() |
| 331 | + // This would race with completeJob() before the RLock fix |
| 332 | + buffer := handler.(*jobs.AsyncProjectCommandOutputHandler).GetProjectOutputBuffer(ctx.JobID) |
| 333 | + _ = buffer |
| 334 | + }() |
| 335 | + } |
| 336 | + |
| 337 | + // Concurrent operations that modify the buffer |
| 338 | + for i := 0; i < numGoroutines; i++ { |
| 339 | + wg.Add(1) |
| 340 | + go func(id int) { |
| 341 | + defer wg.Done() |
| 342 | + if id%10 == 0 { |
| 343 | + // Occasionally complete a job to test completeJob() race |
| 344 | + handler.Send(ctx, "", true) |
| 345 | + } else { |
| 346 | + handler.Send(ctx, "test", false) |
| 347 | + } |
| 348 | + }(i) |
| 349 | + } |
| 350 | + |
| 351 | + wg.Wait() |
| 352 | + }) |
| 353 | + |
| 354 | + // Clean up |
| 355 | + close(prjCmdOutputChan) |
| 356 | +} |
| 357 | + |
| 358 | +// TestHighConcurrencyStress performs stress testing with many concurrent operations |
| 359 | +func TestHighConcurrencyStress(t *testing.T) { |
| 360 | + if testing.Short() { |
| 361 | + t.Skip("Skipping stress test in short mode") |
| 362 | + } |
| 363 | + |
| 364 | + logger := logging.NewNoopLogger(t) |
| 365 | + prjCmdOutputChan := make(chan *jobs.ProjectCmdOutputLine) |
| 366 | + handler := jobs.NewAsyncProjectCommandOutputHandler(prjCmdOutputChan, logger) |
| 367 | + |
| 368 | + // Start the handler |
| 369 | + go handler.Handle() |
| 370 | + |
| 371 | + var wg sync.WaitGroup |
| 372 | + numWorkers := 20 |
| 373 | + operationsPerWorker := 100 |
| 374 | + |
| 375 | + // Multiple workers performing mixed operations |
| 376 | + wg.Add(numWorkers) |
| 377 | + for worker := 0; worker < numWorkers; worker++ { |
| 378 | + go func(workerID int) { |
| 379 | + defer wg.Done() |
| 380 | + |
| 381 | + ctx := createTestProjectCmdContext(t) |
| 382 | + ctx.JobID = "worker-job-" + fmt.Sprintf("%d", workerID) |
| 383 | + ctx.Pull.Num = workerID |
| 384 | + |
| 385 | + pullInfo := jobs.PullInfo{ |
| 386 | + PullNum: ctx.Pull.Num, |
| 387 | + Repo: ctx.BaseRepo.Name, |
| 388 | + RepoFullName: ctx.BaseRepo.FullName, |
| 389 | + ProjectName: ctx.ProjectName, |
| 390 | + Path: ctx.RepoRelDir, |
| 391 | + Workspace: ctx.Workspace, |
| 392 | + } |
| 393 | + |
| 394 | + for op := 0; op < operationsPerWorker; op++ { |
| 395 | + switch op % 6 { |
| 396 | + case 0: |
| 397 | + // Send messages |
| 398 | + handler.Send(ctx, "stress test message", false) |
| 399 | + case 1: |
| 400 | + // Read pull to job mapping |
| 401 | + mappings := handler.GetPullToJobMapping() |
| 402 | + _ = mappings |
| 403 | + case 2: |
| 404 | + // Read job ID map for pull |
| 405 | + jobMap := handler.(*jobs.AsyncProjectCommandOutputHandler).GetJobIDMapForPull(pullInfo) |
| 406 | + _ = jobMap |
| 407 | + case 3: |
| 408 | + // Read project output buffer |
| 409 | + buffer := handler.(*jobs.AsyncProjectCommandOutputHandler).GetProjectOutputBuffer(ctx.JobID) |
| 410 | + _ = buffer |
| 411 | + case 4: |
| 412 | + // Read receiver buffer |
| 413 | + receivers := handler.(*jobs.AsyncProjectCommandOutputHandler).GetReceiverBufferForPull(ctx.JobID) |
| 414 | + _ = receivers |
| 415 | + case 5: |
| 416 | + // Occasional cleanup |
| 417 | + if op%20 == 0 { |
| 418 | + handler.CleanUp(pullInfo) |
| 419 | + } |
| 420 | + } |
| 421 | + } |
| 422 | + }(worker) |
| 423 | + } |
| 424 | + |
| 425 | + wg.Wait() |
| 426 | + close(prjCmdOutputChan) |
| 427 | +} |
0 commit comments