|
107 | 107 | _/binary>>). |
108 | 108 |
|
109 | 109 | -define(SKIP_SEARCH_JUMP, 2048). |
110 | | --define(READ_AHEAD_LIMIT, 4096). |
111 | 110 |
|
112 | 111 | %% Specification of the Log format. |
113 | 112 | %% |
|
425 | 424 | -record(ra, |
426 | 425 | {on = true :: boolean(), |
427 | 426 | size = ?HEADER_SIZE_B + ?DEFAULT_FILTER_SIZE :: non_neg_integer(), |
428 | | - buf :: undefined | {Pos :: non_neg_integer(), binary()} |
| 427 | + buf :: undefined | {Pos :: non_neg_integer(), binary()}, |
| 428 | + limit = read_ahead_limit() :: pos_integer() |
429 | 429 | }). |
430 | 430 | -record(read, |
431 | 431 | {type :: data | offset, |
@@ -3335,7 +3335,7 @@ iter_read_ahead(Fd, Pos, MinReqSize, Credit0, DataSize, NumEntries, Ra0) |
3335 | 3335 | %% needed to serve that, else we read up to the readahead |
3336 | 3336 | %% limit but not beyond the end of the chunk and not less |
3337 | 3337 | %% that the minimum request size |
3338 | | - MinSize = max(MinReqSize, min(?READ_AHEAD_LIMIT, DataSize)), |
| 3338 | + MinSize = max(MinReqSize, min(read_ahead_limit(), DataSize)), |
3339 | 3339 | Size = max(MinSize, iter_guess_size(Credit0, NumEntries, |
3340 | 3340 | DataSize)), |
3341 | 3341 | {ok, Data} = file:pread(Fd, Pos, Size), |
@@ -3364,15 +3364,15 @@ ra_read(_Pos, _Len, _Ra) -> |
3364 | 3364 | undefined. |
3365 | 3365 |
|
3366 | 3366 | ra_update_size(undefined, FilterSize, LastDataSize, |
3367 | | - #ra{on = true, size = Sz} = Ra) |
3368 | | - when Sz < ?READ_AHEAD_LIMIT andalso |
3369 | | - LastDataSize =< (?READ_AHEAD_LIMIT - ?HEADER_SIZE_B - |
| 3367 | + #ra{on = true, size = Sz, limit = Limit} = Ra) |
| 3368 | + when Sz < Limit andalso |
| 3369 | + LastDataSize =< (Limit - ?HEADER_SIZE_B - |
3370 | 3370 | FilterSize - ?REC_HDR_SZ_SUBBATCH_B) -> |
3371 | 3371 | %% no filter and last data size was small so enable data read ahead |
3372 | | - Ra#ra{size = ?READ_AHEAD_LIMIT}; |
| 3372 | + Ra#ra{size = Limit}; |
3373 | 3373 | ra_update_size(undefined, FilterSize, LastDataSize, |
3374 | | - #ra{on = true, size = ?READ_AHEAD_LIMIT} = Ra) |
3375 | | - when LastDataSize =< (?READ_AHEAD_LIMIT - ?HEADER_SIZE_B - |
| 3374 | + #ra{on = true, size = Limit, limit = Limit} = Ra) |
| 3375 | + when LastDataSize =< (Limit - ?HEADER_SIZE_B - |
3376 | 3376 | FilterSize - ?REC_HDR_SZ_SUBBATCH_B) -> |
3377 | 3377 | Ra; |
3378 | 3378 | ra_update_size(_Filter, FilterSize, _LastDataSize, #ra{size = Sz} = Ra) -> |
@@ -3434,35 +3434,39 @@ write_in_chunks(ToWrite, MsgsPerChunk, Msg, W0) when ToWrite > 0 -> |
3434 | 3434 | write_in_chunks(_, _, _, W) -> |
3435 | 3435 | W. |
3436 | 3436 |
|
| 3437 | +read_ahead_limit() -> |
| 3438 | + application:get_env(osiris, read_ahead_limit, 4096). |
| 3439 | + |
3437 | 3440 | -ifdef(TEST). |
3438 | 3441 | -include_lib("eunit/include/eunit.hrl"). |
3439 | 3442 |
|
3440 | 3443 | ra_update_size_test() -> |
| 3444 | + ReadAheadLimit = read_ahead_limit(), |
3441 | 3445 | DefSize = ?HEADER_SIZE_B + ?DEFAULT_FILTER_SIZE, |
3442 | 3446 | ?assertMatch(#ra{size = DefSize}, #ra{}), |
3443 | 3447 | Ra0 = #ra{}, |
3444 | | - ?assertMatch(#ra{size = ?READ_AHEAD_LIMIT}, |
| 3448 | + ?assertMatch(#ra{size = ReadAheadLimit}, |
3445 | 3449 | ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, 100, Ra0)), |
3446 | 3450 |
|
3447 | | - ?assertMatch(#ra{size = ?READ_AHEAD_LIMIT}, |
| 3451 | + ?assertMatch(#ra{size = ReadAheadLimit}, |
3448 | 3452 | ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, 100, Ra0)), |
3449 | | - Ra1 = #ra{size = ?READ_AHEAD_LIMIT}, |
| 3453 | + Ra1 = #ra{size = ReadAheadLimit}, |
3450 | 3454 | ?assertMatch(#ra{size = DefSize}, |
3451 | 3455 | ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, 5000, Ra1)), |
3452 | 3456 |
|
3453 | | - ?assertMatch(#ra{size = ?READ_AHEAD_LIMIT}, |
| 3457 | + ?assertMatch(#ra{size = ReadAheadLimit}, |
3454 | 3458 | ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, 100, Ra1)), |
3455 | 3459 |
|
3456 | 3460 | ?assertMatch(#ra{size = DefSize}, |
3457 | 3461 | ra_update_size("a filter", ?DEFAULT_FILTER_SIZE, 100, Ra0)), |
3458 | 3462 |
|
3459 | 3463 | %% we need to ensure that if we enable read ahead we can at least fulfil |
3460 | 3464 | %% the prior chunk including header filter and record length header |
3461 | | - MaxEnablingDataSize = ?READ_AHEAD_LIMIT - ?HEADER_SIZE_B - ?DEFAULT_FILTER_SIZE - ?REC_HDR_SZ_SUBBATCH_B, |
| 3465 | + MaxEnablingDataSize = ReadAheadLimit - ?HEADER_SIZE_B - ?DEFAULT_FILTER_SIZE - ?REC_HDR_SZ_SUBBATCH_B, |
3462 | 3466 | ?assertMatch(#ra{size = DefSize}, |
3463 | 3467 | ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, MaxEnablingDataSize + 1 , |
3464 | 3468 | Ra0)), |
3465 | | - ?assertMatch(#ra{size = ?READ_AHEAD_LIMIT}, |
| 3469 | + ?assertMatch(#ra{size = ReadAheadLimit}, |
3466 | 3470 | ra_update_size(undefined, ?DEFAULT_FILTER_SIZE, MaxEnablingDataSize, |
3467 | 3471 | Ra0)), |
3468 | 3472 | ok. |
|
0 commit comments