|
| 1 | +package mapping |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "encoding/json" |
| 6 | + "io" |
| 7 | + "time" |
| 8 | + |
| 9 | + std_errors "errors" |
| 10 | + |
| 11 | + "cloud.google.com/go/storage" |
| 12 | + "github.com/davecgh/go-spew/spew" |
| 13 | + "github.com/pierrre/compare" |
| 14 | + "github.com/pkg/errors" |
| 15 | +) |
| 16 | + |
| 17 | +type Writer struct { |
| 18 | + writerBucket func(ctx context.Context, fileName string) io.WriteCloser |
| 19 | + readerLoad func(ctx context.Context, eventFamily string, version string, environment string) (*Mapping, error) |
| 20 | +} |
| 21 | + |
| 22 | +func NewWriter(gcreds *GcloudCreds, bucketName string) (*Writer, *storage.Client, error) { |
| 23 | + gb, gbClient, err := NewGCSBucketGetter(gcreds, bucketName) |
| 24 | + if err != nil { |
| 25 | + return nil, nil, errors.Wrap(err, "new gcs bucket") |
| 26 | + } |
| 27 | + |
| 28 | + return &Writer{ |
| 29 | + writerBucket: gb.GetStorageWriter, |
| 30 | + readerLoad: newReaderFromGCSClient(gb.GetStorageReader).Load, |
| 31 | + }, gbClient, nil |
| 32 | +} |
| 33 | + |
| 34 | +func NewWriterFromGCSClient(gbSW func(ctx context.Context, fileName string) io.WriteCloser, gbSL func(ctx context.Context, fileName string) (io.ReadCloser, error)) (*Writer, error) { |
| 35 | + return &Writer{ |
| 36 | + writerBucket: gbSW, |
| 37 | + readerLoad: newReaderFromGCSClient(gbSL).Load, |
| 38 | + }, nil |
| 39 | +} |
| 40 | + |
| 41 | +func (w *Writer) Upload(ctx context.Context, eventFamily, version string, environment string, writeMapping *Mapping, forceUpload bool) error { |
| 42 | + ctx, cancel := context.WithTimeout(ctx, time.Second*50) |
| 43 | + defer cancel() |
| 44 | + filename := getMappingFilename(eventFamily, version, environment) |
| 45 | + //if force upload is false, we check for already existing mapping and return without overwriting |
| 46 | + if !forceUpload { |
| 47 | + readMapping, err := w.readerLoad(ctx, eventFamily, version, environment) |
| 48 | + if err != nil && UnwrapAll(err) != storage.ErrObjectNotExist { |
| 49 | + return errors.Wrap(err, "get storage reader") |
| 50 | + } |
| 51 | + diff := compare.Compare(readMapping, writeMapping) |
| 52 | + if readMapping != nil { |
| 53 | + return errors.Errorf("mapping already exists:\nread:\n%s\nwrite:\n%s\ndiff:\n%+v", spew.Sdump(readMapping), spew.Sdump(writeMapping), diff) |
| 54 | + } |
| 55 | + } |
| 56 | + |
| 57 | + // only if force upload is true or object does not exists |
| 58 | + writer := w.writerBucket(ctx, filename) |
| 59 | + encoder := json.NewEncoder(writer) |
| 60 | + err := encoder.Encode(writeMapping) |
| 61 | + if err != nil { |
| 62 | + return errors.Wrap(err, "encode mapping") |
| 63 | + } |
| 64 | + err = writer.Close() |
| 65 | + if err != nil { |
| 66 | + return errors.Wrap(err, "close uploaded gcp file") |
| 67 | + } |
| 68 | + return nil |
| 69 | +} |
| 70 | + |
| 71 | +// Unwrap calls std_errors.Unwrap. |
| 72 | +func Unwrap(err error) error { |
| 73 | + return std_errors.Unwrap(err) |
| 74 | +} |
| 75 | + |
| 76 | +// UnwrapAll unwraps all nested errors, and returns the last one. |
| 77 | +func UnwrapAll(err error) error { |
| 78 | + for { |
| 79 | + werr := Unwrap(err) |
| 80 | + if werr == nil { |
| 81 | + return err |
| 82 | + } |
| 83 | + err = werr |
| 84 | + } |
| 85 | +} |
0 commit comments