|
6 | 6 | #include <chrono> |
7 | 7 | #include <sstream> |
8 | 8 | #include <ctime> |
| 9 | +#include <thread> |
9 | 10 |
|
10 | 11 | #include "rocksdb/env.h" |
11 | 12 |
|
@@ -196,15 +197,35 @@ Status Redis::SetMaxCacheStatisticKeys(size_t max_cache_statistic_keys) { |
196 | 197 |
|
197 | 198 | /* |
198 | 199 | * compactrange no longer supports compact for a single data type |
| 200 | + * |
| 201 | + * 魔改版本:添加延迟放大并发竞争窗口,用于复现 SST 损坏问题 |
| 202 | + * 注意:此修改仅用于测试环境,生产环境请勿使用 |
199 | 203 | */ |
200 | 204 | Status Redis::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end) { |
| 205 | + // 随机延迟 0-50ms,让 7 个 CF 的启动时间错开但仍重叠 |
| 206 | + std::this_thread::sleep_for(std::chrono::milliseconds(rand() % 50)); |
| 207 | + |
201 | 208 | db_->CompactRange(default_compact_range_options_, begin, end); |
| 209 | + |
| 210 | + // 每个 CF 之间固定延迟 20ms,增加并发重叠度 |
| 211 | + std::this_thread::sleep_for(std::chrono::milliseconds(20)); |
202 | 212 | db_->CompactRange(default_compact_range_options_, handles_[kHashesDataCF], begin, end); |
| 213 | + |
| 214 | + std::this_thread::sleep_for(std::chrono::milliseconds(20)); |
203 | 215 | db_->CompactRange(default_compact_range_options_, handles_[kSetsDataCF], begin, end); |
| 216 | + |
| 217 | + std::this_thread::sleep_for(std::chrono::milliseconds(20)); |
204 | 218 | db_->CompactRange(default_compact_range_options_, handles_[kListsDataCF], begin, end); |
| 219 | + |
| 220 | + std::this_thread::sleep_for(std::chrono::milliseconds(20)); |
205 | 221 | db_->CompactRange(default_compact_range_options_, handles_[kZsetsDataCF], begin, end); |
| 222 | + |
| 223 | + std::this_thread::sleep_for(std::chrono::milliseconds(20)); |
206 | 224 | db_->CompactRange(default_compact_range_options_, handles_[kZsetsScoreCF], begin, end); |
| 225 | + |
| 226 | + std::this_thread::sleep_for(std::chrono::milliseconds(20)); |
207 | 227 | db_->CompactRange(default_compact_range_options_, handles_[kStreamsDataCF], begin, end); |
| 228 | + |
208 | 229 | return Status::OK(); |
209 | 230 | } |
210 | 231 |
|
@@ -312,18 +333,13 @@ Status Redis::LongestNotCompactionSstCompact(const DataType& option_type, std::v |
312 | 333 | // clear deleted sst file records because we use them in different cf |
313 | 334 | listener_.Clear(); |
314 | 335 |
|
315 | | - // The main goal of compaction was reclaimed the disk space and removed |
316 | | - // the tombstone. It seems that compaction scheduler was unnecessary here when |
317 | | - // the live files was too few, Hard code to 1 here. |
318 | | - if (props.size() <= 1) { |
319 | | - // LOG(WARNING) << "LongestNotCompactionSstCompact " << handles_[idx]->GetName() << " only one file"; |
320 | | - if (compact_result_vec) { |
321 | | - compact_result_vec->push_back(Status::OK()); |
322 | | - } |
323 | | - continue; |
324 | | - } |
| 336 | + // 魔改:删除文件数量限制,只要有文件就 compact |
| 337 | + // 原代码:if (props.size() <= 1) { ... continue; } |
| 338 | + // 现在:注释掉这段逻辑,让 compact 更激进 |
325 | 339 |
|
326 | | - size_t max_files_to_compact = 1; |
| 340 | + // size_t max_files_to_compact = 1; |
| 341 | + // 魔改:每次处理 5 个文件,增加并发任务数 |
| 342 | + size_t max_files_to_compact = 5; |
327 | 343 | const StorageOptions& storageOptions = storage_->GetStorageOptions(); |
328 | 344 | if (props.size() / storageOptions.compact_param_.compact_every_num_of_files_ > max_files_to_compact) { |
329 | 345 | max_files_to_compact = props.size() / storageOptions.compact_param_.compact_every_num_of_files_; |
|
0 commit comments