@@ -29,9 +29,6 @@ import (
29
29
"sync"
30
30
"time"
31
31
32
- "github.com/m3db/m3/src/dbnode/encoding"
33
- "github.com/m3db/m3/src/dbnode/encoding/tile"
34
- "github.com/m3db/m3/src/dbnode/generated/proto/annotation"
35
32
"github.com/m3db/m3/src/dbnode/generated/proto/pagetoken"
36
33
"github.com/m3db/m3/src/dbnode/namespace"
37
34
"github.com/m3db/m3/src/dbnode/persist"
@@ -47,7 +44,6 @@ import (
47
44
"github.com/m3db/m3/src/dbnode/storage/series/lookup"
48
45
"github.com/m3db/m3/src/dbnode/tracepoint"
49
46
"github.com/m3db/m3/src/dbnode/ts"
50
- "github.com/m3db/m3/src/dbnode/ts/downsample"
51
47
"github.com/m3db/m3/src/dbnode/ts/writes"
52
48
"github.com/m3db/m3/src/dbnode/x/xio"
53
49
"github.com/m3db/m3/src/m3ninx/doc"
@@ -72,18 +68,16 @@ const (
72
68
)
73
69
74
70
var (
75
- errShardEntryNotFound = errors .New ("shard entry not found" )
76
- errShardNotOpen = errors .New ("shard is not open" )
77
- errShardAlreadyTicking = errors .New ("shard is already ticking" )
78
- errShardClosingTickTerminated = errors .New ("shard is closing, terminating tick" )
79
- errShardInvalidPageToken = errors .New ("shard could not unmarshal page token" )
80
- errNewShardEntryTagsTypeInvalid = errors .New ("new shard entry options error: tags type invalid" )
81
- errNewShardEntryTagsIterNotAtIndexZero = errors .New ("new shard entry options error: tags iter not at index zero" )
82
- errShardIsNotBootstrapped = errors .New ("shard is not bootstrapped" )
83
- errShardAlreadyBootstrapped = errors .New ("shard is already bootstrapped" )
84
- errFlushStateIsNotInitialized = errors .New ("shard flush state is not initialized" )
85
- errFlushStateAlreadyInitialized = errors .New ("shard flush state is already initialized" )
86
- errTriedToLoadNilSeries = errors .New ("tried to load nil series into shard" )
71
+ errShardEntryNotFound = errors .New ("shard entry not found" )
72
+ errShardNotOpen = errors .New ("shard is not open" )
73
+ errShardAlreadyTicking = errors .New ("shard is already ticking" )
74
+ errShardClosingTickTerminated = errors .New ("shard is closing, terminating tick" )
75
+ errShardInvalidPageToken = errors .New ("shard could not unmarshal page token" )
76
+ errNewShardEntryTagsTypeInvalid = errors .New ("new shard entry options error: tags type invalid" )
77
+ errShardIsNotBootstrapped = errors .New ("shard is not bootstrapped" )
78
+ errShardAlreadyBootstrapped = errors .New ("shard is already bootstrapped" )
79
+ errFlushStateIsNotInitialized = errors .New ("shard flush state is not initialized" )
80
+ errTriedToLoadNilSeries = errors .New ("tried to load nil series into shard" )
87
81
88
82
// ErrDatabaseLoadLimitHit is the error returned when the database load limit
89
83
// is hit or exceeded.
@@ -188,6 +182,7 @@ type dbShard struct {
188
182
currRuntimeOptions dbShardRuntimeOptions
189
183
logger * zap.Logger
190
184
metrics dbShardMetrics
185
+ tileAggregator TileAggregator
191
186
ticking bool
192
187
shard uint32
193
188
coldWritesEnabled bool
@@ -328,6 +323,7 @@ func newDatabaseShard(
328
323
coldWritesEnabled : namespaceMetadata .Options ().ColdWritesEnabled (),
329
324
logger : opts .InstrumentOptions ().Logger (),
330
325
metrics : newDatabaseShardMetrics (shard , scope ),
326
+ tileAggregator : opts .TileAggregator (),
331
327
}
332
328
s .insertQueue = newDatabaseShardInsertQueue (s .insertSeriesBatch ,
333
329
s .nowFn , scope , opts .InstrumentOptions ().Logger ())
@@ -2663,21 +2659,26 @@ func (s *dbShard) Repair(
2663
2659
2664
2660
func (s * dbShard ) AggregateTiles (
2665
2661
sourceNsID ident.ID ,
2666
- sourceShardID uint32 ,
2662
+ targetNs Namespace ,
2663
+ shardID uint32 ,
2667
2664
blockReaders []fs.DataFileSetReader ,
2668
2665
writer fs.StreamingWriter ,
2669
2666
sourceBlockVolumes []shardBlockVolume ,
2670
2667
opts AggregateTilesOptions ,
2671
- targetSchemaDescr namespace.SchemaDescr ,
2672
2668
) (int64 , error ) {
2673
2669
if len (blockReaders ) != len (sourceBlockVolumes ) {
2674
- return 0 , fmt .Errorf ("blockReaders and sourceBlockVolumes length mismatch (%d != %d)" , len (blockReaders ), len (sourceBlockVolumes ))
2670
+ return 0 , fmt .Errorf (
2671
+ "blockReaders and sourceBlockVolumes length mismatch (%d != %d)" ,
2672
+ len (blockReaders ),
2673
+ len (sourceBlockVolumes ))
2675
2674
}
2676
2675
2677
2676
openBlockReaders := make ([]fs.DataFileSetReader , 0 , len (blockReaders ))
2678
2677
defer func () {
2679
2678
for _ , reader := range openBlockReaders {
2680
- reader .Close ()
2679
+ if err := reader .Close (); err != nil {
2680
+ s .logger .Error ("could not close DataFileSetReader" , zap .Error (err ))
2681
+ }
2681
2682
}
2682
2683
}()
2683
2684
@@ -2687,7 +2688,7 @@ func (s *dbShard) AggregateTiles(
2687
2688
openOpts := fs.DataReaderOpenOptions {
2688
2689
Identifier : fs.FileSetFileIdentifier {
2689
2690
Namespace : sourceNsID ,
2690
- Shard : sourceShardID ,
2691
+ Shard : shardID ,
2691
2692
BlockStart : sourceBlockVolume .blockStart ,
2692
2693
VolumeIndex : sourceBlockVolume .latestVolume ,
2693
2694
},
@@ -2706,46 +2707,15 @@ func (s *dbShard) AggregateTiles(
2706
2707
zap .Int ("volumeIndex" , sourceBlockVolume .latestVolume ))
2707
2708
return 0 , err
2708
2709
}
2709
- if blockReader .Entries () > maxEntries {
2710
- maxEntries = blockReader .Entries ()
2710
+
2711
+ entries := blockReader .Entries ()
2712
+ if entries > maxEntries {
2713
+ maxEntries = entries
2711
2714
}
2712
2715
2713
2716
openBlockReaders = append (openBlockReaders , blockReader )
2714
2717
}
2715
2718
2716
- crossBlockReader , err := fs .NewCrossBlockReader (openBlockReaders , s .opts .InstrumentOptions ())
2717
- if err != nil {
2718
- s .logger .Error ("NewCrossBlockReader" , zap .Error (err ))
2719
- return 0 , err
2720
- }
2721
- defer crossBlockReader .Close ()
2722
-
2723
- tileOpts := tile.Options {
2724
- FrameSize : opts .Step ,
2725
- Start : xtime .ToUnixNano (opts .Start ),
2726
- ReaderIteratorPool : s .opts .ReaderIteratorPool (),
2727
- }
2728
-
2729
- readerIter , err := tile .NewSeriesBlockIterator (crossBlockReader , tileOpts )
2730
- if err != nil {
2731
- s .logger .Error ("error when creating new series block iterator" , zap .Error (err ))
2732
- return 0 , err
2733
- }
2734
-
2735
- closed := false
2736
- defer func () {
2737
- if ! closed {
2738
- if err := readerIter .Close (); err != nil {
2739
- // NB: log the error on ungraceful exit.
2740
- s .logger .Error ("could not close read iterator on error" , zap .Error (err ))
2741
- }
2742
- }
2743
- }()
2744
-
2745
- encoder := s .opts .EncoderPool ().Get ()
2746
- defer encoder .Close ()
2747
- encoder .Reset (opts .Start , 0 , targetSchemaDescr )
2748
-
2749
2719
latestTargetVolume , err := s .LatestVolume (opts .Start )
2750
2720
if err != nil {
2751
2721
return 0 , err
@@ -2764,54 +2734,12 @@ func (s *dbShard) AggregateTiles(
2764
2734
return 0 , err
2765
2735
}
2766
2736
2767
- var (
2768
- annotationPayload annotation.Payload
2769
- // NB: there is a maximum of 4 datapoints per frame for counters.
2770
- downsampledValues = make ([]downsample.Value , 0 , 4 )
2771
- processedTileCount int64
2772
- segmentCapacity int
2773
- writerData = make ([][]byte , 2 )
2774
- multiErr xerrors.MultiError
2775
- )
2776
-
2777
- for readerIter .Next () {
2778
- seriesIter , id , encodedTags := readerIter .Current ()
2779
-
2780
- seriesTileCount , err := encodeAggregatedSeries (seriesIter , annotationPayload , downsampledValues , encoder )
2781
- if err != nil {
2782
- s .metrics .largeTilesWriteErrors .Inc (1 )
2783
- multiErr = multiErr .Add (err )
2784
- break
2785
- }
2786
-
2787
- if seriesTileCount == 0 {
2788
- break
2789
- }
2790
-
2791
- processedTileCount += seriesTileCount
2792
- segment := encoder .DiscardReset (opts .Start , segmentCapacity , targetSchemaDescr )
2793
-
2794
- segmentLen := segment .Len ()
2795
- if segmentLen > segmentCapacity {
2796
- // Will use the same capacity for the next series.
2797
- segmentCapacity = segmentLen
2798
- }
2799
-
2800
- writerData [0 ] = segment .Head .Bytes ()
2801
- writerData [1 ] = segment .Tail .Bytes ()
2802
- checksum := segment .CalculateChecksum ()
2803
-
2804
- if err := writer .WriteAll (id , encodedTags , writerData , checksum ); err != nil {
2805
- s .metrics .largeTilesWriteErrors .Inc (1 )
2806
- multiErr = multiErr .Add (err )
2807
- } else {
2808
- s .metrics .largeTilesWrites .Inc (1 )
2809
- }
2810
-
2811
- segment .Finalize ()
2812
- }
2737
+ var multiErr xerrors.MultiError
2813
2738
2814
- if err := readerIter .Err (); err != nil {
2739
+ processedTileCount , err := s .tileAggregator .AggregateTiles (
2740
+ opts , targetNs , s .ID (), openBlockReaders , writer )
2741
+ if err != nil {
2742
+ // NB: cannot return on the error here, must finish writing.
2815
2743
multiErr = multiErr .Add (err )
2816
2744
}
2817
2745
@@ -2833,11 +2761,6 @@ func (s *dbShard) AggregateTiles(
2833
2761
}
2834
2762
}
2835
2763
2836
- closed = true
2837
- if err := readerIter .Close (); err != nil {
2838
- multiErr = multiErr .Add (err )
2839
- }
2840
-
2841
2764
if err := multiErr .FinalError (); err != nil {
2842
2765
return 0 , err
2843
2766
}
@@ -2849,102 +2772,6 @@ func (s *dbShard) AggregateTiles(
2849
2772
return processedTileCount , nil
2850
2773
}
2851
2774
2852
- func encodeAggregatedSeries (
2853
- seriesIter tile.SeriesFrameIterator ,
2854
- annotationPayload annotation.Payload ,
2855
- downsampledValues []downsample.Value ,
2856
- encoder encoding.Encoder ,
2857
- ) (int64 , error ) {
2858
- var (
2859
- prevFrameLastValue = math .NaN ()
2860
- processedTileCount int64
2861
- handleValueResets bool
2862
- firstUnit xtime.Unit
2863
- firstAnnotation ts.Annotation
2864
- err error
2865
- )
2866
-
2867
- for seriesIter .Next () {
2868
- frame := seriesIter .Current ()
2869
-
2870
- frameValues := frame .Values ()
2871
- if len (frameValues ) == 0 {
2872
- continue
2873
- }
2874
-
2875
- if processedTileCount == 0 {
2876
- firstUnit , err = frame .Units ().Value (0 )
2877
- if err != nil {
2878
- return 0 , err
2879
- }
2880
-
2881
- firstAnnotation , err = frame .Annotations ().Value (0 )
2882
- if err != nil {
2883
- return 0 , err
2884
- }
2885
-
2886
- annotationPayload .Reset ()
2887
- if annotationPayload .Unmarshal (firstAnnotation ) == nil {
2888
- // NB: unmarshall error might be a result of some historical annotation data
2889
- // which is not compatible with protobuf payload struct. This would generally mean
2890
- // that metrics type is unknown, so we should ignore the error here.
2891
- handleValueResets = annotationPayload .HandleValueResets
2892
- }
2893
- }
2894
-
2895
- downsampledValues = downsampledValues [:0 ]
2896
- lastIdx := len (frameValues ) - 1
2897
-
2898
- if handleValueResets {
2899
- // Last value plus possible few more datapoints to preserve counter semantics.
2900
- downsampledValues = downsample .DownsampleCounterResets (prevFrameLastValue , frameValues , downsampledValues )
2901
- } else {
2902
- // Plain last value per frame.
2903
- downsampledValue := downsample.Value {
2904
- FrameIndex : lastIdx ,
2905
- Value : frameValues [lastIdx ],
2906
- }
2907
- downsampledValues = append (downsampledValues , downsampledValue )
2908
- }
2909
-
2910
- if err = encodeDownsampledValues (downsampledValues , frame , firstUnit , firstAnnotation , encoder ); err != nil {
2911
- return 0 , err
2912
- }
2913
-
2914
- prevFrameLastValue = frameValues [lastIdx ]
2915
- processedTileCount ++
2916
- }
2917
-
2918
- if err := seriesIter .Err (); err != nil {
2919
- return 0 , err
2920
- }
2921
-
2922
- return processedTileCount , nil
2923
- }
2924
-
2925
- func encodeDownsampledValues (
2926
- downsampledValues []downsample.Value ,
2927
- frame tile.SeriesBlockFrame ,
2928
- unit xtime.Unit ,
2929
- annotation ts.Annotation ,
2930
- encoder encoding.Encoder ,
2931
- ) error {
2932
- for _ , downsampledValue := range downsampledValues {
2933
- timestamp := frame .Timestamps ()[downsampledValue .FrameIndex ]
2934
- dp := ts.Datapoint {
2935
- Timestamp : timestamp ,
2936
- TimestampNanos : xtime .ToUnixNano (timestamp ),
2937
- Value : downsampledValue .Value ,
2938
- }
2939
-
2940
- if err := encoder .Encode (dp , unit , annotation ); err != nil {
2941
- return err
2942
- }
2943
- }
2944
-
2945
- return nil
2946
- }
2947
-
2948
2775
func (s * dbShard ) BootstrapState () BootstrapState {
2949
2776
s .RLock ()
2950
2777
bs := s .bootstrapState
0 commit comments