broker_topic_partition_read_write.go 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. package broker
  2. import (
  3. "fmt"
  4. "sync/atomic"
  5. "time"
  6. "github.com/seaweedfs/seaweedfs/weed/glog"
  7. "github.com/seaweedfs/seaweedfs/weed/mq/topic"
  8. "github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
  9. )
  10. // LogBufferStart tracks the starting buffer index for a live log file
  11. // Buffer indexes are monotonically increasing, count = number of chunks
  12. // Now stored in binary format for efficiency
  13. type LogBufferStart struct {
  14. StartIndex int64 // Starting buffer index (count = len(chunks))
  15. }
  16. func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, p topic.Partition) log_buffer.LogFlushFuncType {
  17. partitionDir := topic.PartitionDir(t, p)
  18. return func(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte) {
  19. if len(buf) == 0 {
  20. return
  21. }
  22. startTime, stopTime = startTime.UTC(), stopTime.UTC()
  23. targetFile := fmt.Sprintf("%s/%s", partitionDir, startTime.Format(topic.TIME_FORMAT))
  24. // Get buffer index (now globally unique across restarts)
  25. bufferIndex := logBuffer.GetBatchIndex()
  26. for {
  27. if err := b.appendToFileWithBufferIndex(targetFile, buf, bufferIndex); err != nil {
  28. glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
  29. time.Sleep(737 * time.Millisecond)
  30. } else {
  31. break
  32. }
  33. }
  34. atomic.StoreInt64(&logBuffer.LastFlushTsNs, stopTime.UnixNano())
  35. b.accessLock.Lock()
  36. defer b.accessLock.Unlock()
  37. if localPartition := b.localTopicManager.GetLocalPartition(t, p); localPartition != nil {
  38. localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs)
  39. }
  40. glog.V(0).Infof("flushing at %d to %s size %d from buffer %s (index %d)", logBuffer.LastFlushTsNs, targetFile, len(buf), logBuffer.GetName(), bufferIndex)
  41. }
  42. }