transfer.sh/server/storage.go

806 lines
19 KiB
Go
Raw Normal View History

package server
import (
2019-03-19 11:35:30 +01:00
"encoding/json"
2020-03-20 13:29:47 +01:00
"errors"
2014-10-20 14:54:42 +02:00
"fmt"
2019-07-21 16:01:02 +02:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
2019-06-23 09:11:54 +02:00
"github.com/aws/aws-sdk-go/service/s3"
2019-07-21 16:01:02 +02:00
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"golang.org/x/net/context"
2018-07-07 20:23:50 +02:00
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
2021-01-05 17:23:47 +01:00
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
2019-11-22 15:14:04 +01:00
"storj.io/common/storj"
2020-02-21 22:42:24 +01:00
"storj.io/uplink"
)
// Storage is the interface for storage operation
type Storage interface {
// Get retrieves a file from storage
Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error)
// Head retrieves content length of a file from storage
Head(token string, filename string) (contentLength uint64, err error)
// Put saves a file on storage
2014-10-20 14:54:42 +02:00
Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error
// Delete removes a file from storage
2018-06-24 06:46:57 +02:00
Delete(token string, filename string) error
// IsNotExist indicates if a file doesn't exist on storage
IsNotExist(err error) bool
// Purge cleans up the storage
2021-01-05 17:23:47 +01:00
Purge(days time.Duration) error
// Type returns the storage type
Type() string
}
// LocalStorage is a local storage
type LocalStorage struct {
2014-10-20 14:54:42 +02:00
Storage
basedir string
logger *log.Logger
}
// NewLocalStorage is the factory for LocalStorage
func NewLocalStorage(basedir string, logger *log.Logger) (*LocalStorage, error) {
return &LocalStorage{basedir: basedir, logger: logger}, nil
}
// Type returns the storage type
func (s *LocalStorage) Type() string {
return "local"
}
// Head retrieves content length of a file from storage
func (s *LocalStorage) Head(token string, filename string) (contentLength uint64, err error) {
path := filepath.Join(s.basedir, token, filename)
var fi os.FileInfo
if fi, err = os.Lstat(path); err != nil {
return
}
contentLength = uint64(fi.Size())
return
}
// Get retrieves a file from storage
func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
2014-10-20 14:54:42 +02:00
path := filepath.Join(s.basedir, token, filename)
2014-10-20 14:54:42 +02:00
// content type , content length
if reader, err = os.Open(path); err != nil {
return
}
2014-10-20 14:54:42 +02:00
var fi os.FileInfo
if fi, err = os.Lstat(path); err != nil {
return
2014-10-20 14:54:42 +02:00
}
2014-10-20 14:54:42 +02:00
contentLength = uint64(fi.Size())
2014-10-20 14:54:42 +02:00
return
}
// Delete removes a file from storage
2018-06-24 06:46:57 +02:00
func (s *LocalStorage) Delete(token string, filename string) (err error) {
metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename))
2018-07-07 20:23:50 +02:00
os.Remove(metadata)
2018-06-24 06:46:57 +02:00
path := filepath.Join(s.basedir, token, filename)
2018-07-07 20:23:50 +02:00
err = os.Remove(path)
2018-06-24 06:46:57 +02:00
return
}
// Purge cleans up the storage
2021-01-05 17:23:47 +01:00
func (s *LocalStorage) Purge(days time.Duration) (err error) {
err = filepath.Walk(s.basedir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
2021-01-11 16:02:00 +01:00
if info.ModTime().Before(time.Now().Add(-1 * days)) {
2021-01-05 17:23:47 +01:00
err = os.Remove(path)
return err
}
return nil
})
return
}
// IsNotExist indicates if a file doesn't exist on storage
func (s *LocalStorage) IsNotExist(err error) bool {
if err == nil {
return false
}
return os.IsNotExist(err)
}
// Put saves a file on storage
func (s *LocalStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
2014-10-20 14:54:42 +02:00
var f io.WriteCloser
var err error
2014-10-20 14:54:42 +02:00
path := filepath.Join(s.basedir, token)
if err = os.MkdirAll(path, 0700); err != nil && !os.IsExist(err) {
2014-10-20 14:54:42 +02:00
return err
}
2014-10-20 14:54:42 +02:00
if f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600); err != nil {
return err
}
2014-10-20 14:54:42 +02:00
defer f.Close()
2014-10-20 14:54:42 +02:00
if _, err = io.Copy(f, reader); err != nil {
return err
}
2014-10-20 14:54:42 +02:00
return nil
}
// S3Storage is a storage backed by AWS S3
type S3Storage struct {
2014-10-20 14:54:42 +02:00
Storage
2019-06-23 09:11:54 +02:00
bucket string
session *session.Session
s3 *s3.S3
2019-03-19 11:35:30 +01:00
logger *log.Logger
2021-01-05 17:23:47 +01:00
purgeDays time.Duration
2019-03-19 11:35:30 +01:00
noMultipart bool
}
// NewS3Storage is the factory for S3Storage
2021-01-05 17:23:47 +01:00
func NewS3Storage(accessKey, secretKey, bucketName string, purgeDays int, region, endpoint string, disableMultipart bool, forcePathStyle bool, logger *log.Logger) (*S3Storage, error) {
2019-07-21 16:01:02 +02:00
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)
2021-01-05 17:23:47 +01:00
return &S3Storage{
2021-01-05 17:24:16 +01:00
bucket: bucketName,
s3: s3.New(sess),
session: sess,
logger: logger,
2021-01-05 17:23:47 +01:00
noMultipart: disableMultipart,
2021-01-05 17:24:16 +01:00
purgeDays: time.Duration(purgeDays*24) * time.Hour,
2021-01-05 17:23:47 +01:00
}, nil
}
// Type returns the storage type
func (s *S3Storage) Type() string {
return "s3"
}
// Head retrieves content length of a file from storage
func (s *S3Storage) Head(token string, filename string) (contentLength uint64, err error) {
key := fmt.Sprintf("%s/%s", token, filename)
2019-06-23 09:11:54 +02:00
headRequest := &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}
// content type , content length
2019-06-23 15:22:49 +02:00
response, err := s.s3.HeadObject(headRequest)
2019-06-24 07:45:47 +02:00
if err != nil {
return
}
2019-06-23 09:11:54 +02:00
if response.ContentLength != nil {
contentLength = uint64(*response.ContentLength)
}
return
}
// Purge cleans up the storage
2021-01-05 17:23:47 +01:00
func (s *S3Storage) Purge(days time.Duration) (err error) {
// NOOP expiration is set at upload time
return nil
}
// IsNotExist indicates if a file doesn't exist on storage
func (s *S3Storage) IsNotExist(err error) bool {
if err == nil {
return false
}
2019-06-23 09:11:54 +02:00
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchKey:
return true
}
}
2019-06-23 09:11:54 +02:00
return false
}
// Get retrieves a file from storage
func (s *S3Storage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
2014-10-20 14:54:42 +02:00
key := fmt.Sprintf("%s/%s", token, filename)
2019-06-23 09:11:54 +02:00
getRequest := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}
2019-06-23 15:22:49 +02:00
response, err := s.s3.GetObject(getRequest)
2019-06-24 07:45:47 +02:00
if err != nil {
return
}
2019-06-23 09:11:54 +02:00
if response.ContentLength != nil {
contentLength = uint64(*response.ContentLength)
}
2014-10-20 14:54:42 +02:00
reader = response.Body
return
}
// Delete removes a file from storage
2018-06-24 06:46:57 +02:00
func (s *S3Storage) Delete(token string, filename string) (err error) {
metadata := fmt.Sprintf("%s/%s.metadata", token, filename)
2019-06-23 09:11:54 +02:00
deleteRequest := &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(metadata),
}
2019-06-23 09:11:54 +02:00
_, err = s.s3.DeleteObject(deleteRequest)
if err != nil {
return
}
2019-06-23 09:11:54 +02:00
key := fmt.Sprintf("%s/%s", token, filename)
deleteRequest = &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}
2019-06-23 09:11:54 +02:00
_, err = s.s3.DeleteObject(deleteRequest)
return
}
// Put saves a file on storage
func (s *S3Storage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
key := fmt.Sprintf("%s/%s", token, filename)
s.logger.Printf("Uploading file %s to S3 Bucket", filename)
2019-06-23 09:11:54 +02:00
var concurrency int
2019-03-19 21:20:15 +01:00
if !s.noMultipart {
2019-06-23 09:11:54 +02:00
concurrency = 20
2019-03-19 11:35:30 +01:00
} else {
2019-06-23 09:11:54 +02:00
concurrency = 1
2019-03-19 11:35:30 +01:00
}
2019-06-23 09:11:54 +02:00
// Create an uploader with the session and custom options
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
u.Concurrency = concurrency // default is 5
u.LeavePartsOnError = false
})
var expire *time.Time
if s.purgeDays.Hours() > 0 {
expire = aws.Time(time.Now().Add(s.purgeDays))
}
2019-06-23 09:11:54 +02:00
_, err = uploader.Upload(&s3manager.UploadInput{
2021-01-05 17:24:16 +01:00
Bucket: aws.String(s.bucket),
Key: aws.String(key),
Body: reader,
Expires: expire,
2019-06-23 09:11:54 +02:00
})
return
}
// GDrive is a storage backed by GDrive
type GDrive struct {
2018-07-07 20:23:50 +02:00
service *drive.Service
rootID string
2018-07-07 20:23:50 +02:00
basedir string
localConfigPath string
2019-03-18 20:52:38 +01:00
chunkSize int
logger *log.Logger
}
// NewGDriveStorage is the factory for GDrive
func NewGDriveStorage(clientJSONFilepath string, localConfigPath string, basedir string, chunkSize int, logger *log.Logger) (*GDrive, error) {
b, err := ioutil.ReadFile(clientJSONFilepath)
if err != nil {
return nil, err
}
// If modifying these scopes, delete your previously saved client_secret.json.
config, err := google.ConfigFromJSON(b, drive.DriveScope, drive.DriveMetadataScope)
if err != nil {
return nil, err
}
srv, err := drive.New(getGDriveClient(config, localConfigPath, logger))
if err != nil {
return nil, err
}
2019-03-18 20:52:38 +01:00
chunkSize = chunkSize * 1024 * 1024
storage := &GDrive{service: srv, basedir: basedir, rootID: "", localConfigPath: localConfigPath, chunkSize: chunkSize, logger: logger}
err = storage.setupRoot()
if err != nil {
return nil, err
}
return storage, nil
}
const gdriveRootConfigFile = "root_id.conf"
const gdriveTokenJSONFile = "token.json"
const gdriveDirectoryMimeType = "application/vnd.google-apps.folder"
func (s *GDrive) setupRoot() error {
rootFileConfig := filepath.Join(s.localConfigPath, gdriveRootConfigFile)
rootID, err := ioutil.ReadFile(rootFileConfig)
if err != nil && !os.IsNotExist(err) {
return err
}
if string(rootID) != "" {
s.rootID = string(rootID)
return nil
}
dir := &drive.File{
2018-07-07 20:23:50 +02:00
Name: s.basedir,
MimeType: gdriveDirectoryMimeType,
}
di, err := s.service.Files.Create(dir).Fields("id").Do()
if err != nil {
return err
}
s.rootID = di.Id
err = ioutil.WriteFile(rootFileConfig, []byte(s.rootID), os.FileMode(0600))
if err != nil {
return err
}
return nil
}
func (s *GDrive) hasChecksum(f *drive.File) bool {
return f.Md5Checksum != ""
}
2018-07-07 20:23:50 +02:00
func (s *GDrive) list(nextPageToken string, q string) (*drive.FileList, error) {
return s.service.Files.List().Fields("nextPageToken, files(id, name, mimeType)").Q(q).PageToken(nextPageToken).Do()
}
func (s *GDrive) findID(filename string, token string) (string, error) {
filename = strings.Replace(filename, `'`, `\'`, -1)
filename = strings.Replace(filename, `"`, `\"`, -1)
fileID, tokenID, nextPageToken := "", "", ""
q := fmt.Sprintf("'%s' in parents and name='%s' and mimeType='%s' and trashed=false", s.rootID, token, gdriveDirectoryMimeType)
l, err := s.list(nextPageToken, q)
2019-03-18 19:09:22 +01:00
if err != nil {
return "", err
}
2019-03-18 19:09:22 +01:00
for 0 < len(l.Files) {
for _, fi := range l.Files {
tokenID = fi.Id
break
}
if l.NextPageToken == "" {
break
}
l, err = s.list(l.NextPageToken, q)
if err != nil {
return "", err
}
}
if filename == "" {
return tokenID, nil
} else if tokenID == "" {
return "", fmt.Errorf("Cannot find file %s/%s", token, filename)
}
q = fmt.Sprintf("'%s' in parents and name='%s' and mimeType!='%s' and trashed=false", tokenID, filename, gdriveDirectoryMimeType)
l, err = s.list(nextPageToken, q)
2019-03-18 19:09:22 +01:00
if err != nil {
return "", err
}
for 0 < len(l.Files) {
for _, fi := range l.Files {
fileID = fi.Id
break
}
if l.NextPageToken == "" {
break
}
l, err = s.list(l.NextPageToken, q)
if err != nil {
return "", err
}
}
if fileID == "" {
return "", fmt.Errorf("Cannot find file %s/%s", token, filename)
}
return fileID, nil
}
// Type returns the storage type
func (s *GDrive) Type() string {
return "gdrive"
}
// Head retrieves content length of a file from storage
func (s *GDrive) Head(token string, filename string) (contentLength uint64, err error) {
var fileID string
fileID, err = s.findID(filename, token)
if err != nil {
return
}
var fi *drive.File
if fi, err = s.service.Files.Get(fileID).Fields("size").Do(); err != nil {
return
}
contentLength = uint64(fi.Size)
return
}
// Get retrieves a file from storage
func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
var fileID string
fileID, err = s.findID(filename, token)
if err != nil {
return
}
var fi *drive.File
fi, err = s.service.Files.Get(fileID).Fields("size", "md5Checksum").Do()
if !s.hasChecksum(fi) {
err = fmt.Errorf("Cannot find file %s/%s", token, filename)
return
}
contentLength = uint64(fi.Size)
ctx := context.Background()
var res *http.Response
res, err = s.service.Files.Get(fileID).Context(ctx).Download()
if err != nil {
return
}
reader = res.Body
return
}
// Delete removes a file from storage
2018-06-24 06:46:57 +02:00
func (s *GDrive) Delete(token string, filename string) (err error) {
metadata, _ := s.findID(fmt.Sprintf("%s.metadata", filename), token)
2018-06-24 06:46:57 +02:00
s.service.Files.Delete(metadata).Do()
var fileID string
fileID, err = s.findID(filename, token)
2018-06-24 06:46:57 +02:00
if err != nil {
return
}
err = s.service.Files.Delete(fileID).Do()
2018-06-24 06:46:57 +02:00
return
}
// Purge cleans up the storage
2021-01-05 17:23:47 +01:00
func (s *GDrive) Purge(days time.Duration) (err error) {
nextPageToken := ""
expirationDate := time.Now().Add(-1 * days).Format(time.RFC3339)
q := fmt.Sprintf("'%s' in parents and modifiedTime < '%s' and mimeType!='%s' and trashed=false", s.rootID, expirationDate, gdriveDirectoryMimeType)
2021-01-05 17:23:47 +01:00
l, err := s.list(nextPageToken, q)
if err != nil {
return err
}
for 0 < len(l.Files) {
for _, fi := range l.Files {
err = s.service.Files.Delete(fi.Id).Do()
if err != nil {
return
}
}
if l.NextPageToken == "" {
break
}
l, err = s.list(l.NextPageToken, q)
if err != nil {
return
}
2021-01-05 17:23:47 +01:00
}
return
}
// IsNotExist indicates if a file doesn't exist on storage
func (s *GDrive) IsNotExist(err error) bool {
if err == nil {
return false
}
if e, ok := err.(*googleapi.Error); ok {
return e.Code == http.StatusNotFound
}
return false
}
// Put saves a file on storage
func (s *GDrive) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
dirID, err := s.findID("", token)
if err != nil {
return err
}
if dirID == "" {
dir := &drive.File{
2018-07-07 20:23:50 +02:00
Name: token,
Parents: []string{s.rootID},
MimeType: gdriveDirectoryMimeType,
}
di, err := s.service.Files.Create(dir).Fields("id").Do()
if err != nil {
return err
}
dirID = di.Id
}
// Instantiate empty drive file
dst := &drive.File{
2018-07-07 20:23:50 +02:00
Name: filename,
Parents: []string{dirID},
MimeType: contentType,
}
ctx := context.Background()
2019-03-18 20:52:38 +01:00
_, err = s.service.Files.Create(dst).Context(ctx).Media(reader, googleapi.ChunkSize(s.chunkSize)).Do()
if err != nil {
return err
}
return nil
}
// Retrieve a token, saves the token, then returns the generated client.
func getGDriveClient(config *oauth2.Config, localConfigPath string, logger *log.Logger) *http.Client {
tokenFile := filepath.Join(localConfigPath, gdriveTokenJSONFile)
tok, err := gDriveTokenFromFile(tokenFile)
if err != nil {
tok = getGDriveTokenFromWeb(config, logger)
saveGDriveToken(tokenFile, tok, logger)
}
return config.Client(context.Background(), tok)
}
// Request a token from the web, then returns the retrieved token.
func getGDriveTokenFromWeb(config *oauth2.Config, logger *log.Logger) *oauth2.Token {
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
fmt.Printf("Go to the following link in your browser then type the "+
"authorization code: \n%v\n", authURL)
var authCode string
if _, err := fmt.Scan(&authCode); err != nil {
logger.Fatalf("Unable to read authorization code %v", err)
}
tok, err := config.Exchange(context.TODO(), authCode)
if err != nil {
logger.Fatalf("Unable to retrieve token from web %v", err)
}
return tok
}
// Retrieves a token from a local file.
func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
f, err := os.Open(file)
defer f.Close()
if err != nil {
return nil, err
}
tok := &oauth2.Token{}
err = json.NewDecoder(f).Decode(tok)
return tok, err
}
// Saves a token to a file path.
func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
logger.Printf("Saving credential file to: %s\n", path)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
defer f.Close()
if err != nil {
logger.Fatalf("Unable to cache oauth token: %v", err)
}
json.NewEncoder(f).Encode(token)
}
// StorjStorage is a storage backed by Storj
type StorjStorage struct {
Storage
2021-01-05 17:24:16 +01:00
project *uplink.Project
bucket *uplink.Bucket
2021-01-05 17:23:47 +01:00
purgeDays time.Duration
2021-01-05 17:24:16 +01:00
logger *log.Logger
}
// NewStorjStorage is the factory for StorjStorage
2021-01-05 17:23:47 +01:00
func NewStorjStorage(access, bucket string, purgeDays int, logger *log.Logger) (*StorjStorage, error) {
var instance StorjStorage
var err error
2019-08-29 16:33:37 +02:00
ctx := context.TODO()
2020-02-21 22:42:24 +01:00
parsedAccess, err := uplink.ParseAccess(access)
if err != nil {
return nil, err
}
2020-02-21 22:42:24 +01:00
instance.project, err = uplink.OpenProject(ctx, parsedAccess)
2019-08-29 16:33:37 +02:00
if err != nil {
return nil, err
2019-08-29 16:33:37 +02:00
}
2020-02-21 22:42:24 +01:00
instance.bucket, err = instance.project.EnsureBucket(ctx, bucket)
if err != nil {
2020-02-21 23:39:12 +01:00
//Ignoring the error to return the one that occurred first, but try to clean up.
_ = instance.project.Close()
return nil, err
}
2021-01-05 17:24:16 +01:00
instance.purgeDays = time.Duration(purgeDays*24) * time.Hour
2021-01-05 17:23:47 +01:00
instance.logger = logger
return &instance, nil
}
// Type returns the storage type
func (s *StorjStorage) Type() string {
return "storj"
}
// Head retrieves content length of a file from storage
func (s *StorjStorage) Head(token string, filename string) (contentLength uint64, err error) {
key := storj.JoinPaths(token, filename)
ctx := context.TODO()
2020-02-21 22:42:24 +01:00
obj, err := s.project.StatObject(ctx, s.bucket.Name, key)
if err != nil {
return 0, err
}
2020-03-06 15:37:52 +01:00
contentLength = uint64(obj.System.ContentLength)
return
}
// Get retrieves a file from storage
func (s *StorjStorage) Get(token string, filename string) (reader io.ReadCloser, contentLength uint64, err error) {
key := storj.JoinPaths(token, filename)
s.logger.Printf("Getting file %s from Storj Bucket", filename)
ctx := context.TODO()
2020-02-21 22:42:24 +01:00
download, err := s.project.DownloadObject(ctx, s.bucket.Name, key, nil)
if err != nil {
return nil, 0, err
}
2020-02-21 23:39:12 +01:00
2020-03-06 15:37:52 +01:00
contentLength = uint64(download.Info().System.ContentLength)
2020-02-21 23:39:12 +01:00
2020-02-21 22:42:24 +01:00
reader = download
return
}
// Delete removes a file from storage
func (s *StorjStorage) Delete(token string, filename string) (err error) {
key := storj.JoinPaths(token, filename)
s.logger.Printf("Deleting file %s from Storj Bucket", filename)
ctx := context.TODO()
2020-02-21 22:42:24 +01:00
_, err = s.project.DeleteObject(ctx, s.bucket.Name, key)
return
}
// Purge cleans up the storage
2021-01-05 17:23:47 +01:00
func (s *StorjStorage) Purge(days time.Duration) (err error) {
// NOOP expiration is set at upload time
return nil
}
// Put saves a file on storage
func (s *StorjStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
key := storj.JoinPaths(token, filename)
s.logger.Printf("Uploading file %s to Storj Bucket", filename)
ctx := context.TODO()
var uploadOptions *uplink.UploadOptions
if s.purgeDays.Hours() > 0 {
uploadOptions = &uplink.UploadOptions{Expires: time.Now().Add(s.purgeDays)}
}
writer, err := s.project.UploadObject(ctx, s.bucket.Name, key, uploadOptions)
if err != nil {
return err
}
2020-02-21 22:48:09 +01:00
2020-03-06 20:36:46 +01:00
n, err := io.Copy(writer, reader)
if err != nil || uint64(n) != contentLength {
2020-02-21 23:39:12 +01:00
//Ignoring the error to return the one that occurred first, but try to clean up.
_ = writer.Abort()
2020-02-21 22:42:24 +01:00
return err
}
2020-03-06 20:36:46 +01:00
err = writer.SetCustomMetadata(ctx, uplink.CustomMetadata{"content-type": contentType})
2020-02-21 22:48:09 +01:00
if err != nil {
2020-02-21 23:39:12 +01:00
//Ignoring the error to return the one that occurred first, but try to clean up.
_ = writer.Abort()
2020-02-21 22:48:09 +01:00
return err
}
2020-02-21 23:39:12 +01:00
2020-02-21 22:48:09 +01:00
err = writer.Commit()
2020-02-21 22:42:24 +01:00
return err
}
// IsNotExist indicates if a file doesn't exist on storage
func (s *StorjStorage) IsNotExist(err error) bool {
2020-03-20 13:29:47 +01:00
return errors.Is(err, uplink.ErrObjectNotFound)
}