Skip to content

Commit 865798a

Browse files
committed
more cleanup
1 parent e228f9e commit 865798a

File tree

6 files changed

+138
-84
lines changed

6 files changed

+138
-84
lines changed

monitoring/exporter/stackdriver/data_test.go

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ const (
4141
value4 = "value_4"
4242
value5 = "value_5"
4343
value6 = "value_6"
44+
value7 = "value_7"
45+
value8 = "value_8"
4446

4547
metric1name = "metric_1"
4648
metric1desc = "this is metric 1"
@@ -119,14 +121,13 @@ var (
119121
Tags: []tag.Tag{{projectKey, project2}},
120122
Data: &view.SumData{Value: 7},
121123
}
124+
view3row3 = &view.Row{
125+
Tags: []tag.Tag{{projectKey, project1}},
126+
Data: &view.SumData{Value: 8},
127+
}
122128
// This Row does not have valid Data field, so is invalid.
123129
invalidRow = &view.Row{Data: nil}
124130

125-
startTime1 = endTime1.Add(-10 * time.Second)
126-
endTime1 = startTime2.Add(-time.Second)
127-
startTime2 = endTime2.Add(-10 * time.Second)
128-
endTime2 = time.Now()
129-
130131
resource1 = &mrpb.MonitoredResource{
131132
Type: "cloudsql_database",
132133
Labels: map[string]string{
@@ -145,6 +146,23 @@ var (
145146
}
146147
)
147148

149+
// Timestamps. We make sure that all time stamps are strictly increasing.
150+
var (
151+
startTime1, endTime1, startTime2, endTime2 time.Time
152+
startTime3, endTime3, startTime4, endTime4 time.Time
153+
)
154+
155+
func init() {
156+
ts := time.Now()
157+
for _, t := range []*time.Time{
158+
&startTime1, &endTime1, &startTime2, &endTime2,
159+
&startTime3, &endTime3, &startTime4, &endTime4,
160+
} {
161+
*t = ts
162+
ts = ts.Add(time.Second)
163+
}
164+
}
165+
148166
func getKey(name string) tag.Key {
149167
key, err := tag.NewKey(name)
150168
if err != nil {

monitoring/exporter/stackdriver/mock_check_test.go

Lines changed: 58 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ import (
3535
var (
3636
// errStorage records all errors and associated RowData objects reported by exporter.
3737
errStorage []errRowData
38-
// bndlerProjMap records project ID of given bundler.
39-
bndlerProjMap map[*bundler.Bundler]string
38+
// projDataMap is a copy of projDataMap used by each tests.
39+
projDataMap map[string]*projectData
4040
// projRds saves all RowData objects passed to addToBundler call by project ID. Since a
4141
// value of a map is not addressable, we save the pointer to the slice.
4242
projRds map[string]*[]*RowData
@@ -63,7 +63,7 @@ func init() {
6363
// testDataInit() initializes all data needed for each test. This function must be called at the
6464
// beginning of each test.
6565
func testDataInit() {
66-
bndlerProjMap = map[*bundler.Bundler]string{}
66+
projDataMap = nil
6767
projRds = map[string]*[]*RowData{}
6868
timeSeriesReqs = nil
6969
timeSeriesResults = nil
@@ -88,15 +88,29 @@ func mockCreateTimeSeries(_ *monitoring.MetricClient, _ context.Context, req *mp
8888
return err
8989
}
9090

91-
func mockNewBundler(projectID string, _ interface{}, _ func(interface{})) *bundler.Bundler {
92-
bndler := &bundler.Bundler{}
93-
// Record the bundler's project ID.
94-
bndlerProjMap[bndler] = projectID
95-
return bndler
91+
func mockNewBundler(_ interface{}, _ func(interface{})) *bundler.Bundler {
92+
// We do not return nil but create an empty Bundler object because
93+
// 1. Exporter.newProjectData() is setting fields of Bundler.
94+
// 2. mockAddToBundler needs to get the project ID of the bundler. To do that we need
95+
// different address for each bundler.
96+
return &bundler.Bundler{}
9697
}
9798

9899
func mockAddToBundler(bndler *bundler.Bundler, item interface{}, _ int) error {
99-
projID := bndlerProjMap[bndler]
100+
// Get the project ID of the bndler by inspecting projDataMap.
101+
var projID string
102+
projIDfound := false
103+
for tempProjID, pd := range projDataMap {
104+
if pd.bndler == bndler {
105+
projID = tempProjID
106+
projIDfound = true
107+
break
108+
}
109+
}
110+
if !projIDfound {
111+
return unrecognizedDataError
112+
}
113+
100114
rds, ok := projRds[projID]
101115
if !ok {
102116
// For new project ID, create the actual slice and save its pointer.
@@ -108,6 +122,41 @@ func mockAddToBundler(bndler *bundler.Bundler, item interface{}, _ int) error {
108122
return nil
109123
}
110124

125+
// newTest*() functions create exporters and project data used for testing. Each test should call
126+
// One of these functions once and only once, and never call NewExporter() directly.
127+
128+
// newTestExp creates an exporter which saves error to errStorage. Caller should not set
129+
// opts.OnError.
130+
func newTestExp(t *testing.T, opts *Options) *Exporter {
131+
opts.OnError = testOnError
132+
exp, err := NewExporter(ctx, opts)
133+
if err != nil {
134+
t.Fatalf("creating exporter failed: %v", err)
135+
}
136+
// Expose projDataMap so that mockAddToBundler() can use it.
137+
projDataMap = exp.projDataMap
138+
return exp
139+
}
140+
141+
// newTestProjData creates a projectData object to test behavior of projectData.uploadRowData. Other
142+
// uses are not recommended. As newTestExp, all errors are saved to errStorage.
143+
func newTestProjData(t *testing.T, opts *Options) *projectData {
144+
return newTestExp(t, opts).newProjectData(project1)
145+
}
146+
147+
// We define a storage for all errors happened in export operation.
148+
149+
type errRowData struct {
150+
err error
151+
rds []*RowData
152+
}
153+
154+
// testOnError records any incoming error and accompanying RowData array. This function is passed to
155+
// the exporter to record errors.
156+
func testOnError(err error, rds ...*RowData) {
157+
errStorage = append(errStorage, errRowData{err, rds})
158+
}
159+
111160
// checkMetricClient checks all recorded requests to the metric client. We only compare int64
112161
// values of the time series. To make this work, we assigned different int64 values for all valid
113162
// rows in the test.
@@ -137,19 +186,6 @@ func checkMetricClient(t *testing.T, wantReqsValues [][]int64) {
137186
}
138187
}
139188

140-
// We define a storage for all errors happened in export operation.
141-
142-
type errRowData struct {
143-
err error
144-
rds []*RowData
145-
}
146-
147-
// testOnError records any incoming error and accompanying RowData array. This function is passed to
148-
// the exporter to record errors.
149-
func testOnError(err error, rds ...*RowData) {
150-
errStorage = append(errStorage, errRowData{err, rds})
151-
}
152-
153189
// errRowDataCheck contains data for checking content of error storage.
154190
type errRowDataCheck struct {
155191
errPrefix, errSuffix string
@@ -208,17 +244,6 @@ func checkRowData(rd, wantRd *RowData) error {
208244
return nil
209245
}
210246

211-
// newTestExp creates an exporter which saves error to errStorage. Caller should not set
212-
// opts.OnError.
213-
func newTestExp(t *testing.T, opts *Options) *Exporter {
214-
opts.OnError = testOnError
215-
exp, err := NewExporter(ctx, opts)
216-
if err != nil {
217-
t.Fatalf("creating exporter failed: %v", err)
218-
}
219-
return exp
220-
}
221-
222247
// checkProjData checks all data passed to the bundler by bundler.Add().
223248
func checkProjData(t *testing.T, wantProjData map[string][]*RowData) {
224249
wantProj := map[string]bool{}
@@ -243,12 +268,6 @@ func checkProjData(t *testing.T, wantProjData map[string][]*RowData) {
243268
}
244269
}
245270

246-
// newTestProjData creates a projectData object to test behavior of projectData.uploadRowData. Other
247-
// uses are not recommended. As newTestExp, all errors are saved to errStorage.
248-
func newTestProjData(t *testing.T, opts *Options) *projectData {
249-
return newTestExp(t, opts).newProjectData(project1)
250-
}
251-
252271
// checkLabels checks data in labels.
253272
func checkLabels(t *testing.T, prefix string, labels, wantLabels map[string]string) {
254273
for labelName, value := range labels {

monitoring/exporter/stackdriver/project_data.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ func (e *Exporter) newProjectData(projectID string) *projectData {
4242
projectID: projectID,
4343
}
4444

45-
pd.bndler = newBundler(projectID, (*RowData)(nil), pd.uploadRowData)
45+
pd.bndler = newBundler((*RowData)(nil), pd.uploadRowData)
4646
// Set options for bundler if they are provided by users.
4747
if 0 < e.opts.BundleDelayThreshold {
4848
pd.bndler.DelayThreshold = e.opts.BundleDelayThreshold

monitoring/exporter/stackdriver/row_data_to_point.go

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,18 @@ package stackdriver
1717
import (
1818
"time"
1919

20-
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
20+
tspb "github.com/golang/protobuf/ptypes/timestamp"
2121
"go.opencensus.io/stats"
2222
"go.opencensus.io/stats/view"
23-
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
24-
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
23+
dspb "google.golang.org/genproto/googleapis/api/distribution"
24+
mpb "google.golang.org/genproto/googleapis/monitoring/v3"
2525
)
2626

2727
// Functions in this file is used to convert RowData to monitoring point that are used by uploading
2828
// RPC calls of monitoring client. All functions in this file are copied from
2929
// contrib.go.opencensus.io/exporter/stackdriver.
3030

31-
func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
31+
func newPoint(v *view.View, row *view.Row, start, end time.Time) *mpb.Point {
3232
switch v.Aggregation.Type {
3333
case view.AggTypeLastValue:
3434
return newGaugePoint(v, row, end)
@@ -37,14 +37,14 @@ func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.P
3737
}
3838
}
3939

40-
func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
41-
return &monitoringpb.Point{
42-
Interval: &monitoringpb.TimeInterval{
43-
StartTime: &timestamppb.Timestamp{
40+
func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *mpb.Point {
41+
return &mpb.Point{
42+
Interval: &mpb.TimeInterval{
43+
StartTime: &tspb.Timestamp{
4444
Seconds: start.Unix(),
4545
Nanos: int32(start.Nanosecond()),
4646
},
47-
EndTime: &timestamppb.Timestamp{
47+
EndTime: &tspb.Timestamp{
4848
Seconds: end.Unix(),
4949
Nanos: int32(end.Nanosecond()),
5050
},
@@ -53,50 +53,50 @@ func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *moni
5353
}
5454
}
5555

56-
func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point {
57-
gaugeTime := &timestamppb.Timestamp{
56+
func newGaugePoint(v *view.View, row *view.Row, end time.Time) *mpb.Point {
57+
gaugeTime := &tspb.Timestamp{
5858
Seconds: end.Unix(),
5959
Nanos: int32(end.Nanosecond()),
6060
}
61-
return &monitoringpb.Point{
62-
Interval: &monitoringpb.TimeInterval{
61+
return &mpb.Point{
62+
Interval: &mpb.TimeInterval{
6363
EndTime: gaugeTime,
6464
},
6565
Value: newTypedValue(v, row),
6666
}
6767
}
6868

69-
func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
69+
func newTypedValue(vd *view.View, r *view.Row) *mpb.TypedValue {
7070
switch v := r.Data.(type) {
7171
case *view.CountData:
72-
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
72+
return &mpb.TypedValue{Value: &mpb.TypedValue_Int64Value{
7373
Int64Value: v.Value,
7474
}}
7575
case *view.SumData:
7676
switch vd.Measure.(type) {
7777
case *stats.Int64Measure:
78-
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
78+
return &mpb.TypedValue{Value: &mpb.TypedValue_Int64Value{
7979
Int64Value: int64(v.Value),
8080
}}
8181
case *stats.Float64Measure:
82-
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
82+
return &mpb.TypedValue{Value: &mpb.TypedValue_DoubleValue{
8383
DoubleValue: v.Value,
8484
}}
8585
}
8686
case *view.DistributionData:
87-
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
88-
DistributionValue: &distributionpb.Distribution{
87+
return &mpb.TypedValue{Value: &mpb.TypedValue_DistributionValue{
88+
DistributionValue: &dspb.Distribution{
8989
Count: v.Count,
9090
Mean: v.Mean,
9191
SumOfSquaredDeviation: v.SumOfSquaredDev,
9292
// TODO(songya): uncomment this once Stackdriver supports min/max.
93-
// Range: &distributionpb.Distribution_Range{
93+
// Range: &dspb.Distribution_Range{
9494
// Min: v.Min,
9595
// Max: v.Max,
9696
// },
97-
BucketOptions: &distributionpb.Distribution_BucketOptions{
98-
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
99-
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
97+
BucketOptions: &dspb.Distribution_BucketOptions{
98+
Options: &dspb.Distribution_BucketOptions_ExplicitBuckets{
99+
ExplicitBuckets: &dspb.Distribution_BucketOptions_Explicit{
100100
Bounds: vd.Aggregation.Buckets,
101101
},
102102
},
@@ -107,11 +107,11 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
107107
case *view.LastValueData:
108108
switch vd.Measure.(type) {
109109
case *stats.Int64Measure:
110-
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
110+
return &mpb.TypedValue{Value: &mpb.TypedValue_Int64Value{
111111
Int64Value: int64(v.Value),
112112
}}
113113
case *stats.Float64Measure:
114-
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
114+
return &mpb.TypedValue{Value: &mpb.TypedValue_DoubleValue{
115115
DoubleValue: v.Value,
116116
}}
117117
}

monitoring/exporter/stackdriver/stackdriver.go

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ type Options struct {
8181
// returned, and in that case the error is reported to callers via OnError and the row data
8282
// will not be uploaded to stackdriver. When GetProjectID is not set, for any row data with
8383
// tag key name "project_id" (it's defined as ProjectKeyName), the value of the tag will be
84-
// it's project ID. All other row data will besilently ignored by RowDataNotApplicableError.
84+
// it's project ID. All other row data will be silently ignored.
8585
GetProjectID func(*RowData) (projectID string, err error)
8686
// OnError is used to report any error happened while exporting view data fails. Whenever
8787
// this function is called, it's guaranteed that at least one row data is also passed to
@@ -134,12 +134,8 @@ func defaultMakeResource(rd *RowData) (*mrpb.MonitoredResource, error) {
134134
var (
135135
newMetricClient = monitoring.NewMetricClient
136136
createTimeSeries = (*monitoring.MetricClient).CreateTimeSeries
137-
// Although bundler.NewBundler does not require projectID, we pass so that tests can mark
138-
// each bundler to the associated project.
139-
newBundler = func(projectID string, itemExample interface{}, handler func(interface{})) *bundler.Bundler {
140-
return bundler.NewBundler(itemExample, handler)
141-
}
142-
addToBundler = (*bundler.Bundler).Add
137+
newBundler = bundler.NewBundler
138+
addToBundler = (*bundler.Bundler).Add
143139
)
144140

145141
// NewExporter creates an Exporter object. Once a call to NewExporter is made, any fields in opts

0 commit comments

Comments
 (0)