mirror of
https://github.com/getsops/sops.git
synced 2026-02-05 12:45:21 +01:00
Implement sops publish command (#473)
* Implement `sops publish` command Publishes a file to a pre-configured destination (this lives in the sops config file). Additionally, support re-encryption rules that work just like the creation rules. Initial support for S3/GCS. This is a part of the sops-workspace v2.0 project Includes the addition of a new dependency: github.com/googleapis/gax-go/v2 * code review changes; support global --verbose flag * Switch to recreation_rule with full support Reencryption rule is now recreation rule and supports everything that a creation rule does. Now, when you load a config for a file, you load either the creation rule or the destination rule. I'm not sure about this style long term, but it allows for support to be added for the recreation rules without a bigger refactor of how the config file works. * split loadForFileFromBytes into two functions remove branching based on destination rule or not, create one for creation rules and one for destination rules * pretty diff for keygroup updates in sops publish
This commit is contained in:
36
publish/gcs.go
Normal file
36
publish/gcs.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
)
|
||||
|
||||
type GCSDestination struct {
|
||||
gcsBucket string
|
||||
gcsPrefix string
|
||||
}
|
||||
|
||||
func NewGCSDestination(gcsBucket string, gcsPrefix string) *GCSDestination {
|
||||
return &GCSDestination{gcsBucket, gcsPrefix}
|
||||
}
|
||||
|
||||
func (gcsd *GCSDestination) Path(fileName string) string {
|
||||
return fmt.Sprintf("gcs://%s/%s%s", gcsd.gcsBucket, gcsd.gcsPrefix, fileName)
|
||||
}
|
||||
|
||||
func (gcsd *GCSDestination) Upload(fileContents []byte, fileName string) error {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wc := client.Bucket(gcsd.gcsBucket).Object(gcsd.gcsPrefix + fileName).NewWriter(ctx)
|
||||
defer wc.Close()
|
||||
_, err = wc.Write(fileContents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
6
publish/publish.go
Normal file
6
publish/publish.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package publish
|
||||
|
||||
type Destination interface {
|
||||
Upload(fileContents []byte, fileName string) error
|
||||
Path(fileName string) string
|
||||
}
|
||||
38
publish/s3.go
Normal file
38
publish/s3.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
type S3Destination struct {
|
||||
s3Bucket string
|
||||
s3Prefix string
|
||||
}
|
||||
|
||||
func NewS3Destination(s3Bucket string, s3Prefix string) *S3Destination {
|
||||
return &S3Destination{s3Bucket, s3Prefix}
|
||||
}
|
||||
|
||||
func (s3d *S3Destination) Path(fileName string) string {
|
||||
return fmt.Sprintf("s3://%s/%s%s", s3d.s3Bucket, s3d.s3Prefix, fileName)
|
||||
}
|
||||
|
||||
func (s3d *S3Destination) Upload(fileContents []byte, fileName string) error {
|
||||
sess := session.Must(session.NewSession())
|
||||
svc := s3.New(sess)
|
||||
input := &s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(bytes.NewReader(fileContents)),
|
||||
Bucket: aws.String(s3d.s3Bucket),
|
||||
Key: aws.String(s3d.s3Prefix + fileName),
|
||||
}
|
||||
_, err := svc.PutObject(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user