filechunks_test.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. package filer
  2. import (
  3. "context"
  4. "fmt"
  5. "log"
  6. "math"
  7. "math/rand/v2"
  8. "strconv"
  9. "testing"
  10. "github.com/stretchr/testify/assert"
  11. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  12. )
  13. func TestCompactFileChunks(t *testing.T) {
  14. chunks := []*filer_pb.FileChunk{
  15. {Offset: 10, Size: 100, FileId: "abc", ModifiedTsNs: 50},
  16. {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100},
  17. {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200},
  18. {Offset: 110, Size: 200, FileId: "jkl", ModifiedTsNs: 300},
  19. }
  20. compacted, garbage := CompactFileChunks(context.Background(), nil, chunks)
  21. if len(compacted) != 3 {
  22. t.Fatalf("unexpected compacted: %d", len(compacted))
  23. }
  24. if len(garbage) != 1 {
  25. t.Fatalf("unexpected garbage: %d", len(garbage))
  26. }
  27. }
  28. func TestCompactFileChunks2(t *testing.T) {
  29. chunks := []*filer_pb.FileChunk{
  30. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50},
  31. {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100},
  32. {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200},
  33. {Offset: 0, Size: 100, FileId: "abcf", ModifiedTsNs: 300},
  34. {Offset: 50, Size: 100, FileId: "fhfh", ModifiedTsNs: 400},
  35. {Offset: 100, Size: 100, FileId: "yuyu", ModifiedTsNs: 500},
  36. }
  37. k := 3
  38. for n := 0; n < k; n++ {
  39. chunks = append(chunks, &filer_pb.FileChunk{
  40. Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n),
  41. })
  42. chunks = append(chunks, &filer_pb.FileChunk{
  43. Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k),
  44. })
  45. }
  46. compacted, garbage := CompactFileChunks(context.Background(), nil, chunks)
  47. if len(compacted) != 4 {
  48. t.Fatalf("unexpected compacted: %d", len(compacted))
  49. }
  50. if len(garbage) != 8 {
  51. t.Fatalf("unexpected garbage: %d", len(garbage))
  52. }
  53. }
  54. func TestRandomFileChunksCompact(t *testing.T) {
  55. data := make([]byte, 1024)
  56. var chunks []*filer_pb.FileChunk
  57. for i := 0; i < 15; i++ {
  58. start, stop := rand.IntN(len(data)), rand.IntN(len(data))
  59. if start > stop {
  60. start, stop = stop, start
  61. }
  62. if start+16 < stop {
  63. stop = start + 16
  64. }
  65. chunk := &filer_pb.FileChunk{
  66. FileId: strconv.Itoa(i),
  67. Offset: int64(start),
  68. Size: uint64(stop - start),
  69. ModifiedTsNs: int64(i),
  70. Fid: &filer_pb.FileId{FileKey: uint64(i)},
  71. }
  72. chunks = append(chunks, chunk)
  73. for x := start; x < stop; x++ {
  74. data[x] = byte(i)
  75. }
  76. }
  77. visibles, _ := NonOverlappingVisibleIntervals(context.Background(), nil, chunks, 0, math.MaxInt64)
  78. for visible := visibles.Front(); visible != nil; visible = visible.Next {
  79. v := visible.Value
  80. for x := v.start; x < v.stop; x++ {
  81. assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId)
  82. }
  83. }
  84. }
  85. func TestIntervalMerging(t *testing.T) {
  86. testcases := []struct {
  87. Chunks []*filer_pb.FileChunk
  88. Expected []*VisibleInterval
  89. }{
  90. // case 0: normal
  91. {
  92. Chunks: []*filer_pb.FileChunk{
  93. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  94. {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
  95. {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
  96. },
  97. Expected: []*VisibleInterval{
  98. {start: 0, stop: 100, fileId: "abc"},
  99. {start: 100, stop: 200, fileId: "asdf"},
  100. {start: 200, stop: 300, fileId: "fsad"},
  101. },
  102. },
  103. // case 1: updates overwrite full chunks
  104. {
  105. Chunks: []*filer_pb.FileChunk{
  106. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  107. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  108. },
  109. Expected: []*VisibleInterval{
  110. {start: 0, stop: 200, fileId: "asdf"},
  111. },
  112. },
  113. // case 2: updates overwrite part of previous chunks
  114. {
  115. Chunks: []*filer_pb.FileChunk{
  116. {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
  117. {Offset: 0, Size: 70, FileId: "b", ModifiedTsNs: 134},
  118. },
  119. Expected: []*VisibleInterval{
  120. {start: 0, stop: 70, fileId: "b"},
  121. {start: 70, stop: 100, fileId: "a", offsetInChunk: 70},
  122. },
  123. },
  124. // case 3: updates overwrite full chunks
  125. {
  126. Chunks: []*filer_pb.FileChunk{
  127. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  128. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  129. {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  130. },
  131. Expected: []*VisibleInterval{
  132. {start: 0, stop: 50, fileId: "asdf"},
  133. {start: 50, stop: 300, fileId: "xxxx"},
  134. },
  135. },
  136. // case 4: updates far away from prev chunks
  137. {
  138. Chunks: []*filer_pb.FileChunk{
  139. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  140. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  141. {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  142. },
  143. Expected: []*VisibleInterval{
  144. {start: 0, stop: 200, fileId: "asdf"},
  145. {start: 250, stop: 500, fileId: "xxxx"},
  146. },
  147. },
  148. // case 5: updates overwrite full chunks
  149. {
  150. Chunks: []*filer_pb.FileChunk{
  151. {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
  152. {Offset: 0, Size: 200, FileId: "d", ModifiedTsNs: 184},
  153. {Offset: 70, Size: 150, FileId: "c", ModifiedTsNs: 143},
  154. {Offset: 80, Size: 100, FileId: "b", ModifiedTsNs: 134},
  155. },
  156. Expected: []*VisibleInterval{
  157. {start: 0, stop: 200, fileId: "d"},
  158. {start: 200, stop: 220, fileId: "c", offsetInChunk: 130},
  159. },
  160. },
  161. // case 6: same updates
  162. {
  163. Chunks: []*filer_pb.FileChunk{
  164. {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
  165. {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124},
  166. {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125},
  167. },
  168. Expected: []*VisibleInterval{
  169. {start: 0, stop: 100, fileId: "xyz"},
  170. },
  171. },
  172. // case 7: real updates
  173. {
  174. Chunks: []*filer_pb.FileChunk{
  175. {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", ModifiedTsNs: 123},
  176. {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", ModifiedTsNs: 130},
  177. {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", ModifiedTsNs: 140},
  178. {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", ModifiedTsNs: 150},
  179. {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", ModifiedTsNs: 160},
  180. {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", ModifiedTsNs: 170},
  181. },
  182. Expected: []*VisibleInterval{
  183. {start: 0, stop: 2097152, fileId: "3,029565bf3092"},
  184. {start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
  185. {start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
  186. {start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
  187. {start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
  188. },
  189. },
  190. // case 8: real bug
  191. {
  192. Chunks: []*filer_pb.FileChunk{
  193. {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", ModifiedTsNs: 123},
  194. {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", ModifiedTsNs: 130},
  195. {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", ModifiedTsNs: 140},
  196. {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", ModifiedTsNs: 150},
  197. {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", ModifiedTsNs: 160},
  198. },
  199. Expected: []*VisibleInterval{
  200. {start: 0, stop: 77824, fileId: "4,0b3df938e301"},
  201. {start: 77824, stop: 208896, fileId: "4,0b3f0c7202f0"},
  202. {start: 208896, stop: 339968, fileId: "2,0b4031a72689"},
  203. {start: 339968, stop: 471040, fileId: "3,0b416a557362"},
  204. {start: 471040, stop: 472225, fileId: "6,0b3e0650019c"},
  205. },
  206. },
  207. }
  208. for i, testcase := range testcases {
  209. log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
  210. intervals, _ := NonOverlappingVisibleIntervals(context.Background(), nil, testcase.Chunks, 0, math.MaxInt64)
  211. x := -1
  212. for visible := intervals.Front(); visible != nil; visible = visible.Next {
  213. x++
  214. interval := visible.Value
  215. log.Printf("test case %d, interval start=%d, stop=%d, fileId=%s",
  216. i, interval.start, interval.stop, interval.fileId)
  217. }
  218. x = -1
  219. for visible := intervals.Front(); visible != nil; visible = visible.Next {
  220. x++
  221. interval := visible.Value
  222. if interval.start != testcase.Expected[x].start {
  223. t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
  224. i, x, interval.start, testcase.Expected[x].start)
  225. }
  226. if interval.stop != testcase.Expected[x].stop {
  227. t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
  228. i, x, interval.stop, testcase.Expected[x].stop)
  229. }
  230. if interval.fileId != testcase.Expected[x].fileId {
  231. t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
  232. i, x, interval.fileId, testcase.Expected[x].fileId)
  233. }
  234. if interval.offsetInChunk != testcase.Expected[x].offsetInChunk {
  235. t.Fatalf("failed on test case %d, interval %d, offsetInChunk %d, expect %d",
  236. i, x, interval.offsetInChunk, testcase.Expected[x].offsetInChunk)
  237. }
  238. }
  239. if intervals.Len() != len(testcase.Expected) {
  240. t.Fatalf("failed to compact test case %d, len %d expected %d", i, intervals.Len(), len(testcase.Expected))
  241. }
  242. }
  243. }
  244. func TestChunksReading(t *testing.T) {
  245. testcases := []struct {
  246. Chunks []*filer_pb.FileChunk
  247. Offset int64
  248. Size int64
  249. Expected []*ChunkView
  250. }{
  251. // case 0: normal
  252. {
  253. Chunks: []*filer_pb.FileChunk{
  254. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  255. {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
  256. {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
  257. },
  258. Offset: 0,
  259. Size: 250,
  260. Expected: []*ChunkView{
  261. {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0},
  262. {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100},
  263. {OffsetInChunk: 0, ViewSize: 50, FileId: "fsad", ViewOffset: 200},
  264. },
  265. },
  266. // case 1: updates overwrite full chunks
  267. {
  268. Chunks: []*filer_pb.FileChunk{
  269. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  270. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  271. },
  272. Offset: 50,
  273. Size: 100,
  274. Expected: []*ChunkView{
  275. {OffsetInChunk: 50, ViewSize: 100, FileId: "asdf", ViewOffset: 50},
  276. },
  277. },
  278. // case 2: updates overwrite part of previous chunks
  279. {
  280. Chunks: []*filer_pb.FileChunk{
  281. {Offset: 3, Size: 100, FileId: "a", ModifiedTsNs: 123},
  282. {Offset: 10, Size: 50, FileId: "b", ModifiedTsNs: 134},
  283. },
  284. Offset: 30,
  285. Size: 40,
  286. Expected: []*ChunkView{
  287. {OffsetInChunk: 20, ViewSize: 30, FileId: "b", ViewOffset: 30},
  288. {OffsetInChunk: 57, ViewSize: 10, FileId: "a", ViewOffset: 60},
  289. },
  290. },
  291. // case 3: updates overwrite full chunks
  292. {
  293. Chunks: []*filer_pb.FileChunk{
  294. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  295. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  296. {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  297. },
  298. Offset: 0,
  299. Size: 200,
  300. Expected: []*ChunkView{
  301. {OffsetInChunk: 0, ViewSize: 50, FileId: "asdf", ViewOffset: 0},
  302. {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 50},
  303. },
  304. },
  305. // case 4: updates far away from prev chunks
  306. {
  307. Chunks: []*filer_pb.FileChunk{
  308. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  309. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  310. {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  311. },
  312. Offset: 0,
  313. Size: 400,
  314. Expected: []*ChunkView{
  315. {OffsetInChunk: 0, ViewSize: 200, FileId: "asdf", ViewOffset: 0},
  316. {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 250},
  317. },
  318. },
  319. // case 5: updates overwrite full chunks
  320. {
  321. Chunks: []*filer_pb.FileChunk{
  322. {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
  323. {Offset: 0, Size: 200, FileId: "c", ModifiedTsNs: 184},
  324. {Offset: 70, Size: 150, FileId: "b", ModifiedTsNs: 143},
  325. {Offset: 80, Size: 100, FileId: "xxxx", ModifiedTsNs: 134},
  326. },
  327. Offset: 0,
  328. Size: 220,
  329. Expected: []*ChunkView{
  330. {OffsetInChunk: 0, ViewSize: 200, FileId: "c", ViewOffset: 0},
  331. {OffsetInChunk: 130, ViewSize: 20, FileId: "b", ViewOffset: 200},
  332. },
  333. },
  334. // case 6: same updates
  335. {
  336. Chunks: []*filer_pb.FileChunk{
  337. {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
  338. {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124},
  339. {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125},
  340. },
  341. Offset: 0,
  342. Size: 100,
  343. Expected: []*ChunkView{
  344. {OffsetInChunk: 0, ViewSize: 100, FileId: "xyz", ViewOffset: 0},
  345. },
  346. },
  347. // case 7: edge cases
  348. {
  349. Chunks: []*filer_pb.FileChunk{
  350. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  351. {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
  352. {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
  353. },
  354. Offset: 0,
  355. Size: 200,
  356. Expected: []*ChunkView{
  357. {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0},
  358. {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100},
  359. },
  360. },
  361. // case 8: edge cases
  362. {
  363. Chunks: []*filer_pb.FileChunk{
  364. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  365. {Offset: 90, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  366. {Offset: 190, Size: 300, FileId: "fsad", ModifiedTsNs: 353},
  367. },
  368. Offset: 0,
  369. Size: 300,
  370. Expected: []*ChunkView{
  371. {OffsetInChunk: 0, ViewSize: 90, FileId: "abc", ViewOffset: 0},
  372. {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 90},
  373. {OffsetInChunk: 0, ViewSize: 110, FileId: "fsad", ViewOffset: 190},
  374. },
  375. },
  376. // case 9: edge cases
  377. {
  378. Chunks: []*filer_pb.FileChunk{
  379. {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", ModifiedTsNs: 1},
  380. {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", ModifiedTsNs: 2},
  381. {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", ModifiedTsNs: 3},
  382. {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", ModifiedTsNs: 4},
  383. {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", ModifiedTsNs: 5},
  384. {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", ModifiedTsNs: 6},
  385. },
  386. Offset: 0,
  387. Size: 153578836,
  388. Expected: []*ChunkView{
  389. {OffsetInChunk: 0, ViewSize: 43175936, FileId: "2,111fc2cbfac1", ViewOffset: 0},
  390. {OffsetInChunk: 0, ViewSize: 52981760 - 43175936, FileId: "2,112a36ea7f85", ViewOffset: 43175936},
  391. {OffsetInChunk: 0, ViewSize: 72564736 - 52981760, FileId: "4,112d5f31c5e7", ViewOffset: 52981760},
  392. {OffsetInChunk: 0, ViewSize: 133255168 - 72564736, FileId: "1,113245f0cdb6", ViewOffset: 72564736},
  393. {OffsetInChunk: 0, ViewSize: 137269248 - 133255168, FileId: "3,1141a70733b5", ViewOffset: 133255168},
  394. {OffsetInChunk: 0, ViewSize: 153578836 - 137269248, FileId: "1,114201d5bbdb", ViewOffset: 137269248},
  395. },
  396. },
  397. }
  398. for i, testcase := range testcases {
  399. if i != 2 {
  400. // continue
  401. }
  402. log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
  403. chunks := ViewFromChunks(context.Background(), nil, testcase.Chunks, testcase.Offset, testcase.Size)
  404. x := -1
  405. for c := chunks.Front(); c != nil; c = c.Next {
  406. x++
  407. chunk := c.Value
  408. log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
  409. i, x, chunk.OffsetInChunk, chunk.ViewSize, chunk.FileId)
  410. if chunk.OffsetInChunk != testcase.Expected[x].OffsetInChunk {
  411. t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d",
  412. i, chunk.FileId, chunk.OffsetInChunk, testcase.Expected[x].OffsetInChunk)
  413. }
  414. if chunk.ViewSize != testcase.Expected[x].ViewSize {
  415. t.Fatalf("failed on read case %d, chunk %s, ViewSize %d, expect %d",
  416. i, chunk.FileId, chunk.ViewSize, testcase.Expected[x].ViewSize)
  417. }
  418. if chunk.FileId != testcase.Expected[x].FileId {
  419. t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
  420. i, x, chunk.FileId, testcase.Expected[x].FileId)
  421. }
  422. if chunk.ViewOffset != testcase.Expected[x].ViewOffset {
  423. t.Fatalf("failed on read case %d, chunk %d, ViewOffset %d, expect %d",
  424. i, x, chunk.ViewOffset, testcase.Expected[x].ViewOffset)
  425. }
  426. }
  427. if chunks.Len() != len(testcase.Expected) {
  428. t.Fatalf("failed to read test case %d, len %d expected %d", i, chunks.Len(), len(testcase.Expected))
  429. }
  430. }
  431. }
  432. func BenchmarkCompactFileChunks(b *testing.B) {
  433. var chunks []*filer_pb.FileChunk
  434. k := 1024
  435. for n := 0; n < k; n++ {
  436. chunks = append(chunks, &filer_pb.FileChunk{
  437. Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n),
  438. })
  439. chunks = append(chunks, &filer_pb.FileChunk{
  440. Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k),
  441. })
  442. }
  443. for n := 0; n < b.N; n++ {
  444. CompactFileChunks(context.Background(), nil, chunks)
  445. }
  446. }
  447. func addVisibleInterval(visibles *IntervalList[*VisibleInterval], x *VisibleInterval) {
  448. visibles.AppendInterval(&Interval[*VisibleInterval]{
  449. StartOffset: x.start,
  450. StopOffset: x.stop,
  451. TsNs: x.modifiedTsNs,
  452. Value: x,
  453. })
  454. }
  455. func TestViewFromVisibleIntervals(t *testing.T) {
  456. visibles := NewIntervalList[*VisibleInterval]()
  457. addVisibleInterval(visibles, &VisibleInterval{
  458. start: 0,
  459. stop: 25,
  460. fileId: "fid1",
  461. })
  462. addVisibleInterval(visibles, &VisibleInterval{
  463. start: 4096,
  464. stop: 8192,
  465. fileId: "fid2",
  466. })
  467. addVisibleInterval(visibles, &VisibleInterval{
  468. start: 16384,
  469. stop: 18551,
  470. fileId: "fid3",
  471. })
  472. views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
  473. if views.Len() != visibles.Len() {
  474. assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
  475. }
  476. }
  477. func TestViewFromVisibleIntervals2(t *testing.T) {
  478. visibles := NewIntervalList[*VisibleInterval]()
  479. addVisibleInterval(visibles, &VisibleInterval{
  480. start: 344064,
  481. stop: 348160,
  482. fileId: "fid1",
  483. })
  484. addVisibleInterval(visibles, &VisibleInterval{
  485. start: 348160,
  486. stop: 356352,
  487. fileId: "fid2",
  488. })
  489. views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
  490. if views.Len() != visibles.Len() {
  491. assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
  492. }
  493. }
  494. func TestViewFromVisibleIntervals3(t *testing.T) {
  495. visibles := NewIntervalList[*VisibleInterval]()
  496. addVisibleInterval(visibles, &VisibleInterval{
  497. start: 1000,
  498. stop: 2000,
  499. fileId: "fid1",
  500. })
  501. addVisibleInterval(visibles, &VisibleInterval{
  502. start: 3000,
  503. stop: 4000,
  504. fileId: "fid2",
  505. })
  506. views := ViewFromVisibleIntervals(visibles, 1700, 1500)
  507. if views.Len() != visibles.Len() {
  508. assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
  509. }
  510. }
  511. func TestCompactFileChunks3(t *testing.T) {
  512. chunks := []*filer_pb.FileChunk{
  513. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50},
  514. {Offset: 100, Size: 100, FileId: "ghi", ModifiedTsNs: 50},
  515. {Offset: 200, Size: 100, FileId: "jkl", ModifiedTsNs: 100},
  516. {Offset: 300, Size: 100, FileId: "def", ModifiedTsNs: 200},
  517. }
  518. compacted, _ := CompactFileChunks(context.Background(), nil, chunks)
  519. if len(compacted) != 4 {
  520. t.Fatalf("unexpected compacted: %d", len(compacted))
  521. }
  522. }