command_volume_check_disk.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. package shell
  2. import (
  3. "bytes"
  4. "context"
  5. "flag"
  6. "fmt"
  7. "io"
  8. "math"
  9. "net/http"
  10. "sync"
  11. "time"
  12. "github.com/seaweedfs/seaweedfs/weed/operation"
  13. "github.com/seaweedfs/seaweedfs/weed/pb"
  14. "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
  15. "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
  16. "github.com/seaweedfs/seaweedfs/weed/server/constants"
  17. "github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
  18. "google.golang.org/grpc"
  19. "slices"
  20. )
  21. func init() {
  22. Commands = append(Commands, &commandVolumeCheckDisk{})
  23. }
  24. type commandVolumeCheckDisk struct {
  25. env *CommandEnv
  26. writer io.Writer
  27. }
  28. func (c *commandVolumeCheckDisk) Name() string {
  29. return "volume.check.disk"
  30. }
  31. func (c *commandVolumeCheckDisk) Help() string {
  32. return `check all replicated volumes to find and fix inconsistencies. It is optional and resource intensive.
  33. How it works:
  34. find all volumes that are replicated
  35. for each volume id, if there are more than 2 replicas, find one pair with the largest 2 in file count.
  36. for the pair volume A and B
  37. append entries in A and not in B to B
  38. append entries in B and not in A to A
  39. `
  40. }
  41. func (c *commandVolumeCheckDisk) HasTag(tag CommandTag) bool {
  42. return tag == ResourceHeavy
  43. }
  44. func (c *commandVolumeCheckDisk) getVolumeStatusFileCount(vid uint32, dn *master_pb.DataNodeInfo) (totalFileCount, deletedFileCount uint64) {
  45. err := operation.WithVolumeServerClient(false, pb.NewServerAddressWithGrpcPort(dn.Id, int(dn.GrpcPort)), c.env.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  46. resp, reqErr := volumeServerClient.VolumeStatus(context.Background(), &volume_server_pb.VolumeStatusRequest{
  47. VolumeId: uint32(vid),
  48. })
  49. if resp != nil {
  50. totalFileCount = resp.FileCount
  51. deletedFileCount = resp.FileDeletedCount
  52. }
  53. return reqErr
  54. })
  55. if err != nil {
  56. fmt.Fprintf(c.writer, "getting number of files for volume id %d from volumes status: %+v\n", vid, err)
  57. }
  58. return totalFileCount, deletedFileCount
  59. }
  60. func (c *commandVolumeCheckDisk) eqVolumeFileCount(a, b *VolumeReplica) (bool, bool) {
  61. var waitGroup sync.WaitGroup
  62. var fileCountA, fileCountB, fileDeletedCountA, fileDeletedCountB uint64
  63. waitGroup.Add(1)
  64. go func() {
  65. defer waitGroup.Done()
  66. fileCountA, fileDeletedCountA = c.getVolumeStatusFileCount(a.info.Id, a.location.dataNode)
  67. }()
  68. waitGroup.Add(1)
  69. go func() {
  70. defer waitGroup.Done()
  71. fileCountB, fileDeletedCountB = c.getVolumeStatusFileCount(b.info.Id, b.location.dataNode)
  72. }()
  73. // Trying to synchronize a remote call to two nodes
  74. waitGroup.Wait()
  75. return fileCountA == fileCountB, fileDeletedCountA == fileDeletedCountB
  76. }
  77. func (c *commandVolumeCheckDisk) shouldSkipVolume(a, b *VolumeReplica, pulseTimeAtSecond int64, syncDeletions, verbose bool) bool {
  78. doSyncDeletedCount := false
  79. if syncDeletions && a.info.DeleteCount != b.info.DeleteCount {
  80. doSyncDeletedCount = true
  81. }
  82. if (a.info.FileCount != b.info.FileCount) || doSyncDeletedCount {
  83. // Do synchronization of volumes, if the modification time was before the last pulsation time
  84. if a.info.ModifiedAtSecond < pulseTimeAtSecond || b.info.ModifiedAtSecond < pulseTimeAtSecond {
  85. return false
  86. }
  87. if eqFileCount, eqDeletedFileCount := c.eqVolumeFileCount(a, b); eqFileCount {
  88. if doSyncDeletedCount && !eqDeletedFileCount {
  89. return false
  90. }
  91. if verbose {
  92. fmt.Fprintf(c.writer, "skipping active volumes %d with the same file counts on %s and %s\n",
  93. a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
  94. }
  95. } else {
  96. return false
  97. }
  98. }
  99. return true
  100. }
  101. func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
  102. fsckCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
  103. slowMode := fsckCommand.Bool("slow", false, "slow mode checks all replicas even file counts are the same")
  104. verbose := fsckCommand.Bool("v", false, "verbose mode")
  105. volumeId := fsckCommand.Uint("volumeId", 0, "the volume id")
  106. applyChanges := fsckCommand.Bool("force", false, "apply the fix")
  107. syncDeletions := fsckCommand.Bool("syncDeleted", false, "sync of deletions the fix")
  108. nonRepairThreshold := fsckCommand.Float64("nonRepairThreshold", 0.3, "repair when missing keys is not more than this limit")
  109. if err = fsckCommand.Parse(args); err != nil {
  110. return nil
  111. }
  112. infoAboutSimulationMode(writer, *applyChanges, "-force")
  113. if err = commandEnv.confirmIsLocked(args); err != nil {
  114. return
  115. }
  116. c.env = commandEnv
  117. c.writer = writer
  118. // collect topology information
  119. pulseTimeAtSecond := time.Now().Unix() - constants.VolumePulseSeconds*2
  120. topologyInfo, _, err := collectTopologyInfo(commandEnv, 0)
  121. if err != nil {
  122. return err
  123. }
  124. volumeReplicas, _ := collectVolumeReplicaLocations(topologyInfo)
  125. // pick 1 pairs of volume replica
  126. for _, replicas := range volumeReplicas {
  127. if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
  128. continue
  129. }
  130. // filter readonly replica
  131. var writableReplicas []*VolumeReplica
  132. for _, replica := range replicas {
  133. if replica.info.ReadOnly {
  134. fmt.Fprintf(writer, "skipping readonly volume %d on %s\n", replica.info.Id, replica.location.dataNode.Id)
  135. } else {
  136. writableReplicas = append(writableReplicas, replica)
  137. }
  138. }
  139. slices.SortFunc(writableReplicas, func(a, b *VolumeReplica) int {
  140. return int(b.info.FileCount - a.info.FileCount)
  141. })
  142. for len(writableReplicas) >= 2 {
  143. a, b := writableReplicas[0], writableReplicas[1]
  144. if !*slowMode && c.shouldSkipVolume(a, b, pulseTimeAtSecond, *syncDeletions, *verbose) {
  145. // always choose the larger volume to be the source
  146. writableReplicas = append(replicas[:1], writableReplicas[2:]...)
  147. continue
  148. }
  149. if err := c.syncTwoReplicas(a, b, *applyChanges, *syncDeletions, *nonRepairThreshold, *verbose); err != nil {
  150. fmt.Fprintf(writer, "sync volume %d on %s and %s: %v\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, err)
  151. }
  152. // always choose the larger volume to be the source
  153. if a.info.FileCount > b.info.FileCount {
  154. writableReplicas = append(writableReplicas[:1], writableReplicas[2:]...)
  155. } else {
  156. writableReplicas = writableReplicas[1:]
  157. }
  158. }
  159. }
  160. return nil
  161. }
  162. func (c *commandVolumeCheckDisk) syncTwoReplicas(a *VolumeReplica, b *VolumeReplica, applyChanges bool, doSyncDeletions bool, nonRepairThreshold float64, verbose bool) (err error) {
  163. aHasChanges, bHasChanges := true, true
  164. const maxIterations = 5
  165. iteration := 0
  166. for (aHasChanges || bHasChanges) && iteration < maxIterations {
  167. iteration++
  168. if verbose {
  169. fmt.Fprintf(c.writer, "sync iteration %d for volume %d\n", iteration, a.info.Id)
  170. }
  171. prevAHasChanges, prevBHasChanges := aHasChanges, bHasChanges
  172. if aHasChanges, bHasChanges, err = c.checkBoth(a, b, applyChanges, doSyncDeletions, nonRepairThreshold, verbose); err != nil {
  173. return err
  174. }
  175. // Detect if we're stuck in a loop with no progress
  176. if iteration > 1 && prevAHasChanges == aHasChanges && prevBHasChanges == bHasChanges && (aHasChanges || bHasChanges) {
  177. fmt.Fprintf(c.writer, "volume %d sync is not making progress between %s and %s after iteration %d, stopping to prevent infinite loop\n",
  178. a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id, iteration)
  179. return fmt.Errorf("sync not making progress after %d iterations", iteration)
  180. }
  181. }
  182. if iteration >= maxIterations && (aHasChanges || bHasChanges) {
  183. fmt.Fprintf(c.writer, "volume %d sync reached maximum iterations (%d) between %s and %s, may need manual intervention\n",
  184. a.info.Id, maxIterations, a.location.dataNode.Id, b.location.dataNode.Id)
  185. return fmt.Errorf("reached maximum sync iterations (%d)", maxIterations)
  186. }
  187. return nil
  188. }
  189. func (c *commandVolumeCheckDisk) checkBoth(a *VolumeReplica, b *VolumeReplica, applyChanges bool, doSyncDeletions bool, nonRepairThreshold float64, verbose bool) (aHasChanges bool, bHasChanges bool, err error) {
  190. aDB, bDB := needle_map.NewMemDb(), needle_map.NewMemDb()
  191. defer func() {
  192. aDB.Close()
  193. bDB.Close()
  194. }()
  195. // read index db
  196. readIndexDbCutoffFrom := uint64(time.Now().UnixNano())
  197. if err = readIndexDatabase(aDB, a.info.Collection, a.info.Id, pb.NewServerAddressFromDataNode(a.location.dataNode), verbose, c.writer, c.env.option.GrpcDialOption); err != nil {
  198. return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", a.location.dataNode, a.info.Id, err)
  199. }
  200. if err := readIndexDatabase(bDB, b.info.Collection, b.info.Id, pb.NewServerAddressFromDataNode(b.location.dataNode), verbose, c.writer, c.env.option.GrpcDialOption); err != nil {
  201. return true, true, fmt.Errorf("readIndexDatabase %s volume %d: %v", b.location.dataNode, b.info.Id, err)
  202. }
  203. // find and make up the differences
  204. aHasChanges, err1 := doVolumeCheckDisk(bDB, aDB, b, a, verbose, c.writer, applyChanges, doSyncDeletions, nonRepairThreshold, readIndexDbCutoffFrom, c.env.option.GrpcDialOption)
  205. bHasChanges, err2 := doVolumeCheckDisk(aDB, bDB, a, b, verbose, c.writer, applyChanges, doSyncDeletions, nonRepairThreshold, readIndexDbCutoffFrom, c.env.option.GrpcDialOption)
  206. if err1 != nil {
  207. return aHasChanges, bHasChanges, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", b.location.dataNode.Id, a.location.dataNode.Id, b.info.Id, err1)
  208. }
  209. if err2 != nil {
  210. return aHasChanges, bHasChanges, fmt.Errorf("doVolumeCheckDisk source:%s target:%s volume %d: %v", a.location.dataNode.Id, b.location.dataNode.Id, a.info.Id, err2)
  211. }
  212. return aHasChanges, bHasChanges, nil
  213. }
  214. func doVolumeCheckDisk(minuend, subtrahend *needle_map.MemDb, source, target *VolumeReplica, verbose bool, writer io.Writer, applyChanges bool, doSyncDeletions bool, nonRepairThreshold float64, cutoffFromAtNs uint64, grpcDialOption grpc.DialOption) (hasChanges bool, err error) {
  215. // find missing keys
  216. // hash join, can be more efficient
  217. var missingNeedles []needle_map.NeedleValue
  218. var partiallyDeletedNeedles []needle_map.NeedleValue
  219. var counter int
  220. doCutoffOfLastNeedle := true
  221. minuend.DescendingVisit(func(minuendValue needle_map.NeedleValue) error {
  222. counter++
  223. if subtrahendValue, found := subtrahend.Get(minuendValue.Key); !found {
  224. if minuendValue.Size.IsDeleted() {
  225. return nil
  226. }
  227. if doCutoffOfLastNeedle {
  228. if needleMeta, err := readNeedleMeta(grpcDialOption, pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, minuendValue); err == nil {
  229. // needles older than the cutoff time are not missing yet
  230. if needleMeta.AppendAtNs > cutoffFromAtNs {
  231. return nil
  232. }
  233. doCutoffOfLastNeedle = false
  234. }
  235. }
  236. missingNeedles = append(missingNeedles, minuendValue)
  237. } else {
  238. if minuendValue.Size.IsDeleted() && !subtrahendValue.Size.IsDeleted() {
  239. partiallyDeletedNeedles = append(partiallyDeletedNeedles, minuendValue)
  240. }
  241. if doCutoffOfLastNeedle {
  242. doCutoffOfLastNeedle = false
  243. }
  244. }
  245. return nil
  246. })
  247. fmt.Fprintf(writer, "volume %d %s has %d entries, %s missed %d and partially deleted %d entries\n",
  248. source.info.Id, source.location.dataNode.Id, counter, target.location.dataNode.Id, len(missingNeedles), len(partiallyDeletedNeedles))
  249. if counter == 0 || (len(missingNeedles) == 0 && len(partiallyDeletedNeedles) == 0) {
  250. return false, nil
  251. }
  252. missingNeedlesFraction := float64(len(missingNeedles)) / float64(counter)
  253. if missingNeedlesFraction > nonRepairThreshold {
  254. return false, fmt.Errorf(
  255. "failed to start repair volume %d, percentage of missing keys is greater than the threshold: %.2f > %.2f",
  256. source.info.Id, missingNeedlesFraction, nonRepairThreshold)
  257. }
  258. for _, needleValue := range missingNeedles {
  259. needleBlob, err := readSourceNeedleBlob(grpcDialOption, pb.NewServerAddressFromDataNode(source.location.dataNode), source.info.Id, needleValue)
  260. if err != nil {
  261. return hasChanges, err
  262. }
  263. if !applyChanges {
  264. continue
  265. }
  266. if verbose {
  267. fmt.Fprintf(writer, "read %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
  268. }
  269. hasChanges = true
  270. if err = writeNeedleBlobToTarget(grpcDialOption, pb.NewServerAddressFromDataNode(target.location.dataNode), source.info.Id, needleValue, needleBlob); err != nil {
  271. return hasChanges, err
  272. }
  273. }
  274. if doSyncDeletions && applyChanges && len(partiallyDeletedNeedles) > 0 {
  275. var fidList []string
  276. for _, needleValue := range partiallyDeletedNeedles {
  277. fidList = append(fidList, needleValue.Key.FileId(source.info.Id))
  278. if verbose {
  279. fmt.Fprintf(writer, "delete %s %s => %s\n", needleValue.Key.FileId(source.info.Id), source.location.dataNode.Id, target.location.dataNode.Id)
  280. }
  281. }
  282. deleteResults, deleteErr := operation.DeleteFileIdsAtOneVolumeServer(
  283. pb.NewServerAddressFromDataNode(target.location.dataNode),
  284. grpcDialOption, fidList, false)
  285. if deleteErr != nil {
  286. return hasChanges, deleteErr
  287. }
  288. for _, deleteResult := range deleteResults {
  289. if deleteResult.Status == http.StatusAccepted && deleteResult.Size > 0 {
  290. hasChanges = true
  291. }
  292. }
  293. }
  294. return hasChanges, nil
  295. }
  296. func readSourceNeedleBlob(grpcDialOption grpc.DialOption, sourceVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue) (needleBlob []byte, err error) {
  297. err = operation.WithVolumeServerClient(false, sourceVolumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  298. resp, err := client.ReadNeedleBlob(context.Background(), &volume_server_pb.ReadNeedleBlobRequest{
  299. VolumeId: volumeId,
  300. Offset: needleValue.Offset.ToActualOffset(),
  301. Size: int32(needleValue.Size),
  302. })
  303. if err != nil {
  304. return err
  305. }
  306. needleBlob = resp.NeedleBlob
  307. return nil
  308. })
  309. return
  310. }
  311. func writeNeedleBlobToTarget(grpcDialOption grpc.DialOption, targetVolumeServer pb.ServerAddress, volumeId uint32, needleValue needle_map.NeedleValue, needleBlob []byte) error {
  312. return operation.WithVolumeServerClient(false, targetVolumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
  313. _, err := client.WriteNeedleBlob(context.Background(), &volume_server_pb.WriteNeedleBlobRequest{
  314. VolumeId: volumeId,
  315. NeedleId: uint64(needleValue.Key),
  316. Size: int32(needleValue.Size),
  317. NeedleBlob: needleBlob,
  318. })
  319. return err
  320. })
  321. }
  322. func readIndexDatabase(db *needle_map.MemDb, collection string, volumeId uint32, volumeServer pb.ServerAddress, verbose bool, writer io.Writer, grpcDialOption grpc.DialOption) error {
  323. var buf bytes.Buffer
  324. if err := copyVolumeIndexFile(collection, volumeId, volumeServer, &buf, verbose, writer, grpcDialOption); err != nil {
  325. return err
  326. }
  327. if verbose {
  328. fmt.Fprintf(writer, "load collection %s volume %d index size %d from %s ...\n", collection, volumeId, buf.Len(), volumeServer)
  329. }
  330. return db.LoadFilterFromReaderAt(bytes.NewReader(buf.Bytes()), true, false)
  331. }
  332. func copyVolumeIndexFile(collection string, volumeId uint32, volumeServer pb.ServerAddress, buf *bytes.Buffer, verbose bool, writer io.Writer, grpcDialOption grpc.DialOption) error {
  333. return operation.WithVolumeServerClient(true, volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
  334. ext := ".idx"
  335. copyFileClient, err := volumeServerClient.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{
  336. VolumeId: volumeId,
  337. Ext: ".idx",
  338. CompactionRevision: math.MaxUint32,
  339. StopOffset: math.MaxInt64,
  340. Collection: collection,
  341. IsEcVolume: false,
  342. IgnoreSourceFileNotFound: false,
  343. })
  344. if err != nil {
  345. return fmt.Errorf("failed to start copying volume %d%s: %v", volumeId, ext, err)
  346. }
  347. err = writeToBuffer(copyFileClient, buf)
  348. if err != nil {
  349. return fmt.Errorf("failed to copy %d%s from %s: %v", volumeId, ext, volumeServer, err)
  350. }
  351. return nil
  352. })
  353. }
  354. func writeToBuffer(client volume_server_pb.VolumeServer_CopyFileClient, buf *bytes.Buffer) error {
  355. for {
  356. resp, receiveErr := client.Recv()
  357. if receiveErr == io.EOF {
  358. break
  359. }
  360. if receiveErr != nil {
  361. return fmt.Errorf("receiving: %w", receiveErr)
  362. }
  363. buf.Write(resp.FileContent)
  364. }
  365. return nil
  366. }