2017-03-22 18:09:21 +01:00
|
|
|
package server
|
2014-10-20 13:38:40 +02:00
|
|
|
|
|
|
|
import (
|
2019-03-19 11:35:30 +01:00
|
|
|
"encoding/json"
|
2014-10-20 14:54:42 +02:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2019-03-19 11:35:30 +01:00
|
|
|
"io/ioutil"
|
2014-11-14 23:32:44 +01:00
|
|
|
"log"
|
2014-11-13 21:41:43 +01:00
|
|
|
"mime"
|
2019-03-19 11:35:30 +01:00
|
|
|
"net/http"
|
2014-10-20 14:54:42 +02:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-03-19 11:35:30 +01:00
|
|
|
"strings"
|
2016-01-29 16:23:25 +01:00
|
|
|
|
2019-07-21 16:01:02 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/session"
|
2019-06-23 09:11:54 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
2019-07-21 16:01:02 +02:00
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
2018-06-19 15:30:26 +02:00
|
|
|
"golang.org/x/net/context"
|
2018-07-07 20:23:50 +02:00
|
|
|
"golang.org/x/oauth2"
|
2018-06-19 15:30:26 +02:00
|
|
|
"golang.org/x/oauth2/google"
|
|
|
|
"google.golang.org/api/drive/v3"
|
|
|
|
"google.golang.org/api/googleapi"
|
2019-08-28 15:18:00 +02:00
|
|
|
"storj.io/storj/lib/uplink"
|
|
|
|
"storj.io/storj/pkg/storj"
|
2014-10-20 13:38:40 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type Storage interface {
|
2014-10-20 14:54:42 +02:00
|
|
|
Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error)
|
2014-11-13 21:41:43 +01:00
|
|
|
Head(token string, filename string) (contentType string, contentLength uint64, err error)
|
2014-10-20 14:54:42 +02:00
|
|
|
Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error
|
2018-06-24 06:46:57 +02:00
|
|
|
Delete(token string, filename string) error
|
2016-01-29 16:23:25 +01:00
|
|
|
IsNotExist(err error) bool
|
2017-03-22 18:09:21 +01:00
|
|
|
|
|
|
|
Type() string
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type LocalStorage struct {
|
2014-10-20 14:54:42 +02:00
|
|
|
Storage
|
|
|
|
basedir string
|
2018-10-27 00:15:55 +02:00
|
|
|
logger *log.Logger
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
2018-10-27 00:15:55 +02:00
|
|
|
func NewLocalStorage(basedir string, logger *log.Logger) (*LocalStorage, error) {
|
|
|
|
return &LocalStorage{basedir: basedir, logger: logger}, nil
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *LocalStorage) Type() string {
|
|
|
|
return "local"
|
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
func (s *LocalStorage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
|
|
|
|
path := filepath.Join(s.basedir, token, filename)
|
|
|
|
|
|
|
|
var fi os.FileInfo
|
|
|
|
if fi, err = os.Lstat(path); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
contentLength = uint64(fi.Size())
|
|
|
|
|
|
|
|
contentType = mime.TypeByExtension(filepath.Ext(filename))
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
|
|
|
|
path := filepath.Join(s.basedir, token, filename)
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
// content type , content length
|
|
|
|
if reader, err = os.Open(path); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
var fi os.FileInfo
|
|
|
|
if fi, err = os.Lstat(path); err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
return
|
2014-10-20 14:54:42 +02:00
|
|
|
}
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
contentLength = uint64(fi.Size())
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
contentType = mime.TypeByExtension(filepath.Ext(filename))
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
return
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
2018-06-24 06:46:57 +02:00
|
|
|
func (s *LocalStorage) Delete(token string, filename string) (err error) {
|
|
|
|
metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename))
|
2018-07-07 20:23:50 +02:00
|
|
|
os.Remove(metadata)
|
2018-06-24 06:46:57 +02:00
|
|
|
|
|
|
|
path := filepath.Join(s.basedir, token, filename)
|
2018-07-07 20:23:50 +02:00
|
|
|
err = os.Remove(path)
|
2018-06-24 06:46:57 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-29 16:23:25 +01:00
|
|
|
func (s *LocalStorage) IsNotExist(err error) bool {
|
2017-03-28 16:12:31 +02:00
|
|
|
if err == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-01-29 16:23:25 +01:00
|
|
|
return os.IsNotExist(err)
|
|
|
|
}
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
func (s *LocalStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
|
2014-10-20 14:54:42 +02:00
|
|
|
var f io.WriteCloser
|
|
|
|
var err error
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
path := filepath.Join(s.basedir, token)
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2019-08-23 22:44:35 +02:00
|
|
|
if err = os.MkdirAll(path, 0700); err != nil && !os.IsExist(err) {
|
2014-10-20 14:54:42 +02:00
|
|
|
return err
|
|
|
|
}
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
if f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
defer f.Close()
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
if _, err = io.Copy(f, reader); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
return nil
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type S3Storage struct {
|
2014-10-20 14:54:42 +02:00
|
|
|
Storage
|
2019-06-23 09:11:54 +02:00
|
|
|
bucket string
|
|
|
|
session *session.Session
|
|
|
|
s3 *s3.S3
|
2019-03-19 11:35:30 +01:00
|
|
|
logger *log.Logger
|
|
|
|
noMultipart bool
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
2019-07-21 16:01:02 +02:00
|
|
|
func NewS3Storage(accessKey, secretKey, bucketName, region, endpoint string, logger *log.Logger, disableMultipart bool, forcePathStyle bool) (*S3Storage, error) {
|
|
|
|
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
return &S3Storage{bucket: bucketName, s3: s3.New(sess), session: sess, logger: logger, noMultipart: disableMultipart}, nil
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *S3Storage) Type() string {
|
|
|
|
return "s3"
|
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
func (s *S3Storage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
headRequest := &s3.HeadObjectInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(key),
|
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
// content type , content length
|
2019-06-23 15:22:49 +02:00
|
|
|
response, err := s.s3.HeadObject(headRequest)
|
2019-06-24 07:45:47 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2019-06-23 09:11:54 +02:00
|
|
|
|
|
|
|
if response.ContentType != nil {
|
|
|
|
contentType = *response.ContentType
|
2014-11-13 21:41:43 +01:00
|
|
|
}
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
if response.ContentLength != nil {
|
|
|
|
contentLength = uint64(*response.ContentLength)
|
2014-11-13 21:41:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-29 16:23:25 +01:00
|
|
|
func (s *S3Storage) IsNotExist(err error) bool {
|
2017-03-28 16:12:31 +02:00
|
|
|
if err == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
if aerr, ok := err.(awserr.Error); ok {
|
|
|
|
switch aerr.Code() {
|
|
|
|
case s3.ErrCodeNoSuchKey:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2016-01-29 16:23:25 +01:00
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
return false
|
2016-01-29 16:23:25 +01:00
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
func (s *S3Storage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
getRequest := &s3.GetObjectInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(key),
|
2014-11-13 21:41:43 +01:00
|
|
|
}
|
|
|
|
|
2019-06-23 15:22:49 +02:00
|
|
|
response, err := s.s3.GetObject(getRequest)
|
2019-06-24 07:45:47 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2019-06-23 09:11:54 +02:00
|
|
|
|
|
|
|
if response.ContentType != nil {
|
|
|
|
contentType = *response.ContentType
|
|
|
|
}
|
|
|
|
|
|
|
|
if response.ContentLength != nil {
|
|
|
|
contentLength = uint64(*response.ContentLength)
|
2014-11-13 21:41:43 +01:00
|
|
|
}
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
reader = response.Body
|
|
|
|
return
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
|
|
|
|
2018-06-24 06:46:57 +02:00
|
|
|
func (s *S3Storage) Delete(token string, filename string) (err error) {
|
|
|
|
metadata := fmt.Sprintf("%s/%s.metadata", token, filename)
|
2019-06-23 09:11:54 +02:00
|
|
|
deleteRequest := &s3.DeleteObjectInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(metadata),
|
2014-11-14 23:32:44 +01:00
|
|
|
}
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
_, err = s.s3.DeleteObject(deleteRequest)
|
|
|
|
if err != nil {
|
|
|
|
return
|
2014-11-14 23:32:44 +01:00
|
|
|
}
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
deleteRequest = &s3.DeleteObjectInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(key),
|
2014-11-14 23:32:44 +01:00
|
|
|
}
|
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
_, err = s.s3.DeleteObject(deleteRequest)
|
2014-11-14 23:32:44 +01:00
|
|
|
|
|
|
|
return
|
2014-10-20 13:38:40 +02:00
|
|
|
}
|
2018-06-19 15:30:26 +02:00
|
|
|
|
2019-03-19 11:02:39 +01:00
|
|
|
func (s *S3Storage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
|
|
|
|
s.logger.Printf("Uploading file %s to S3 Bucket", filename)
|
2019-06-23 09:11:54 +02:00
|
|
|
var concurrency int
|
2019-03-19 21:20:15 +01:00
|
|
|
if !s.noMultipart {
|
2019-06-23 09:11:54 +02:00
|
|
|
concurrency = 20
|
2019-03-19 11:35:30 +01:00
|
|
|
} else {
|
2019-06-23 09:11:54 +02:00
|
|
|
concurrency = 1
|
2019-03-19 11:35:30 +01:00
|
|
|
}
|
2019-03-19 11:02:39 +01:00
|
|
|
|
2019-06-23 09:11:54 +02:00
|
|
|
// Create an uploader with the session and custom options
|
|
|
|
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
|
|
|
|
u.Concurrency = concurrency // default is 5
|
|
|
|
u.LeavePartsOnError = false
|
|
|
|
})
|
|
|
|
|
|
|
|
_, err = uploader.Upload(&s3manager.UploadInput{
|
|
|
|
Bucket: aws.String(s.bucket),
|
|
|
|
Key: aws.String(key),
|
|
|
|
Body: reader,
|
|
|
|
})
|
|
|
|
|
|
|
|
return
|
2019-03-19 11:02:39 +01:00
|
|
|
}
|
|
|
|
|
2018-06-19 15:30:26 +02:00
|
|
|
type GDrive struct {
|
2018-07-07 20:23:50 +02:00
|
|
|
service *drive.Service
|
|
|
|
rootId string
|
|
|
|
basedir string
|
2018-06-19 15:30:26 +02:00
|
|
|
localConfigPath string
|
2019-03-18 20:52:38 +01:00
|
|
|
chunkSize int
|
2018-10-27 00:15:55 +02:00
|
|
|
logger *log.Logger
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
|
|
|
|
2019-03-18 20:52:38 +01:00
|
|
|
func NewGDriveStorage(clientJsonFilepath string, localConfigPath string, basedir string, chunkSize int, logger *log.Logger) (*GDrive, error) {
|
2018-06-19 15:30:26 +02:00
|
|
|
b, err := ioutil.ReadFile(clientJsonFilepath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If modifying these scopes, delete your previously saved client_secret.json.
|
|
|
|
config, err := google.ConfigFromJSON(b, drive.DriveScope, drive.DriveMetadataScope)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-10-27 00:15:55 +02:00
|
|
|
srv, err := drive.New(getGDriveClient(config, localConfigPath, logger))
|
2018-06-19 15:30:26 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-18 20:52:38 +01:00
|
|
|
chunkSize = chunkSize * 1024 * 1024
|
|
|
|
storage := &GDrive{service: srv, basedir: basedir, rootId: "", localConfigPath: localConfigPath, chunkSize: chunkSize, logger: logger}
|
2018-06-19 15:30:26 +02:00
|
|
|
err = storage.setupRoot()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return storage, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
const GDriveRootConfigFile = "root_id.conf"
|
2018-10-26 23:28:47 +02:00
|
|
|
const GDriveTokenJsonFile = "token.json"
|
2018-06-19 15:30:26 +02:00
|
|
|
const GDriveDirectoryMimeType = "application/vnd.google-apps.folder"
|
|
|
|
|
|
|
|
func (s *GDrive) setupRoot() error {
|
|
|
|
rootFileConfig := filepath.Join(s.localConfigPath, GDriveRootConfigFile)
|
|
|
|
|
|
|
|
rootId, err := ioutil.ReadFile(rootFileConfig)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if string(rootId) != "" {
|
|
|
|
s.rootId = string(rootId)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
dir := &drive.File{
|
2018-07-07 20:23:50 +02:00
|
|
|
Name: s.basedir,
|
|
|
|
MimeType: GDriveDirectoryMimeType,
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
di, err := s.service.Files.Create(dir).Fields("id").Do()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.rootId = di.Id
|
2018-07-07 20:23:50 +02:00
|
|
|
err = ioutil.WriteFile(rootFileConfig, []byte(s.rootId), os.FileMode(0600))
|
2018-06-19 15:30:26 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GDrive) hasChecksum(f *drive.File) bool {
|
|
|
|
return f.Md5Checksum != ""
|
|
|
|
}
|
|
|
|
|
2018-07-07 20:23:50 +02:00
|
|
|
func (s *GDrive) list(nextPageToken string, q string) (*drive.FileList, error) {
|
2018-06-19 15:30:26 +02:00
|
|
|
return s.service.Files.List().Fields("nextPageToken, files(id, name, mimeType)").Q(q).PageToken(nextPageToken).Do()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GDrive) findId(filename string, token string) (string, error) {
|
2018-07-13 06:29:34 +02:00
|
|
|
filename = strings.Replace(filename, `'`, `\'`, -1)
|
|
|
|
filename = strings.Replace(filename, `"`, `\"`, -1)
|
|
|
|
|
2018-06-19 15:30:26 +02:00
|
|
|
fileId, tokenId, nextPageToken := "", "", ""
|
|
|
|
|
|
|
|
q := fmt.Sprintf("'%s' in parents and name='%s' and mimeType='%s' and trashed=false", s.rootId, token, GDriveDirectoryMimeType)
|
|
|
|
l, err := s.list(nextPageToken, q)
|
2019-03-18 19:09:22 +01:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-06-19 15:30:26 +02:00
|
|
|
|
2019-03-18 19:09:22 +01:00
|
|
|
for 0 < len(l.Files) {
|
2018-06-19 15:30:26 +02:00
|
|
|
for _, fi := range l.Files {
|
|
|
|
tokenId = fi.Id
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if l.NextPageToken == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err = s.list(l.NextPageToken, q)
|
|
|
|
}
|
|
|
|
|
|
|
|
if filename == "" {
|
|
|
|
return tokenId, nil
|
|
|
|
} else if tokenId == "" {
|
|
|
|
return "", fmt.Errorf("Cannot find file %s/%s", token, filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
q = fmt.Sprintf("'%s' in parents and name='%s' and mimeType!='%s' and trashed=false", tokenId, filename, GDriveDirectoryMimeType)
|
|
|
|
l, err = s.list(nextPageToken, q)
|
2019-03-18 19:09:22 +01:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2018-06-19 15:30:26 +02:00
|
|
|
|
|
|
|
for 0 < len(l.Files) {
|
|
|
|
for _, fi := range l.Files {
|
|
|
|
|
|
|
|
fileId = fi.Id
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if l.NextPageToken == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
l, err = s.list(l.NextPageToken, q)
|
|
|
|
}
|
|
|
|
|
|
|
|
if fileId == "" {
|
|
|
|
return "", fmt.Errorf("Cannot find file %s/%s", token, filename)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fileId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GDrive) Type() string {
|
|
|
|
return "gdrive"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GDrive) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
|
|
|
|
var fileId string
|
|
|
|
fileId, err = s.findId(filename, token)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var fi *drive.File
|
|
|
|
if fi, err = s.service.Files.Get(fileId).Fields("mimeType", "size").Do(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
contentLength = uint64(fi.Size)
|
|
|
|
|
|
|
|
contentType = fi.MimeType
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
|
|
|
|
var fileId string
|
|
|
|
fileId, err = s.findId(filename, token)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var fi *drive.File
|
|
|
|
fi, err = s.service.Files.Get(fileId).Fields("mimeType", "size", "md5Checksum").Do()
|
|
|
|
if !s.hasChecksum(fi) {
|
|
|
|
err = fmt.Errorf("Cannot find file %s/%s", token, filename)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
contentLength = uint64(fi.Size)
|
|
|
|
contentType = fi.MimeType
|
|
|
|
|
2018-06-23 13:12:00 +02:00
|
|
|
ctx := context.Background()
|
2018-06-19 15:30:26 +02:00
|
|
|
var res *http.Response
|
|
|
|
res, err = s.service.Files.Get(fileId).Context(ctx).Download()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-23 13:12:00 +02:00
|
|
|
reader = res.Body
|
2018-06-19 15:30:26 +02:00
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-24 06:46:57 +02:00
|
|
|
func (s *GDrive) Delete(token string, filename string) (err error) {
|
|
|
|
metadata, _ := s.findId(fmt.Sprintf("%s.metadata", filename), token)
|
|
|
|
s.service.Files.Delete(metadata).Do()
|
|
|
|
|
|
|
|
var fileId string
|
|
|
|
fileId, err = s.findId(filename, token)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = s.service.Files.Delete(fileId).Do()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-19 15:30:26 +02:00
|
|
|
func (s *GDrive) IsNotExist(err error) bool {
|
|
|
|
if err != nil {
|
|
|
|
if e, ok := err.(*googleapi.Error); ok {
|
|
|
|
return e.Code == http.StatusNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GDrive) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
|
|
|
|
dirId, err := s.findId("", token)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if dirId == "" {
|
|
|
|
dir := &drive.File{
|
2018-07-07 20:23:50 +02:00
|
|
|
Name: token,
|
|
|
|
Parents: []string{s.rootId},
|
|
|
|
MimeType: GDriveDirectoryMimeType,
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
di, err := s.service.Files.Create(dir).Fields("id").Do()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dirId = di.Id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Instantiate empty drive file
|
|
|
|
dst := &drive.File{
|
2018-07-07 20:23:50 +02:00
|
|
|
Name: filename,
|
|
|
|
Parents: []string{dirId},
|
2018-06-19 15:30:26 +02:00
|
|
|
MimeType: contentType,
|
|
|
|
}
|
|
|
|
|
2018-06-23 13:12:00 +02:00
|
|
|
ctx := context.Background()
|
2019-03-18 20:52:38 +01:00
|
|
|
_, err = s.service.Files.Create(dst).Context(ctx).Media(reader, googleapi.ChunkSize(s.chunkSize)).Do()
|
2018-06-23 13:12:00 +02:00
|
|
|
|
2018-06-19 15:30:26 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve a token, saves the token, then returns the generated client.
|
2018-10-27 00:15:55 +02:00
|
|
|
func getGDriveClient(config *oauth2.Config, localConfigPath string, logger *log.Logger) *http.Client {
|
2018-10-26 23:28:47 +02:00
|
|
|
tokenFile := filepath.Join(localConfigPath, GDriveTokenJsonFile)
|
2018-06-19 15:30:26 +02:00
|
|
|
tok, err := gDriveTokenFromFile(tokenFile)
|
|
|
|
if err != nil {
|
2018-10-27 00:15:55 +02:00
|
|
|
tok = getGDriveTokenFromWeb(config, logger)
|
|
|
|
saveGDriveToken(tokenFile, tok, logger)
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
2018-10-26 23:28:47 +02:00
|
|
|
|
2018-06-19 15:30:26 +02:00
|
|
|
return config.Client(context.Background(), tok)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request a token from the web, then returns the retrieved token.
|
2018-10-27 00:15:55 +02:00
|
|
|
func getGDriveTokenFromWeb(config *oauth2.Config, logger *log.Logger) *oauth2.Token {
|
2018-06-19 15:30:26 +02:00
|
|
|
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
|
|
|
|
fmt.Printf("Go to the following link in your browser then type the "+
|
|
|
|
"authorization code: \n%v\n", authURL)
|
|
|
|
|
|
|
|
var authCode string
|
|
|
|
if _, err := fmt.Scan(&authCode); err != nil {
|
2018-10-27 00:15:55 +02:00
|
|
|
logger.Fatalf("Unable to read authorization code %v", err)
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
|
|
|
|
2018-10-26 23:28:47 +02:00
|
|
|
tok, err := config.Exchange(context.TODO(), authCode)
|
2018-06-19 15:30:26 +02:00
|
|
|
if err != nil {
|
2018-10-27 00:15:55 +02:00
|
|
|
logger.Fatalf("Unable to retrieve token from web %v", err)
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
|
|
|
return tok
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieves a token from a local file.
|
|
|
|
func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
|
|
|
|
f, err := os.Open(file)
|
|
|
|
defer f.Close()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
tok := &oauth2.Token{}
|
|
|
|
err = json.NewDecoder(f).Decode(tok)
|
|
|
|
return tok, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Saves a token to a file path.
|
2018-10-27 00:15:55 +02:00
|
|
|
func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
|
|
|
|
logger.Printf("Saving credential file to: %s\n", path)
|
2018-06-19 15:30:26 +02:00
|
|
|
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
|
|
|
defer f.Close()
|
|
|
|
if err != nil {
|
2018-10-27 00:15:55 +02:00
|
|
|
logger.Fatalf("Unable to cache oauth token: %v", err)
|
2018-06-19 15:30:26 +02:00
|
|
|
}
|
2018-10-27 00:15:55 +02:00
|
|
|
|
2018-06-19 15:30:26 +02:00
|
|
|
json.NewEncoder(f).Encode(token)
|
|
|
|
}
|
2019-08-28 15:18:00 +02:00
|
|
|
|
|
|
|
type StorjStorage struct {
|
|
|
|
Storage
|
|
|
|
uplink *uplink.Uplink
|
|
|
|
project *uplink.Project
|
|
|
|
bucket *uplink.Bucket
|
|
|
|
logger *log.Logger
|
|
|
|
}
|
|
|
|
|
2019-08-28 15:29:35 +02:00
|
|
|
func NewStorjStorage(endpoint, apiKey, bucket, encKey string, logger *log.Logger) (*StorjStorage, error) {
|
2019-08-28 15:18:00 +02:00
|
|
|
var instance StorjStorage
|
|
|
|
var err error
|
|
|
|
|
2019-08-29 16:33:37 +02:00
|
|
|
ctx := context.TODO()
|
2019-08-28 15:18:00 +02:00
|
|
|
|
|
|
|
instance.uplink, err = uplink.NewUplink(ctx, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not create new Uplink Instance: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
key, err := uplink.ParseAPIKey(apiKey)
|
|
|
|
if err != nil {
|
2019-08-29 16:33:37 +02:00
|
|
|
return nil, fmt.Errorf("could not parse api key: %v", err)
|
2019-08-28 15:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
instance.project, err = instance.uplink.OpenProject(ctx, endpoint, key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not open project: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-08-29 16:33:37 +02:00
|
|
|
saltenckey, err := instance.project.SaltedKeyFromPassphrase(ctx, encKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not generate salted enc key: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
access := uplink.NewEncryptionAccessWithDefaultKey(*saltenckey)
|
2019-08-28 15:18:00 +02:00
|
|
|
instance.bucket, err = instance.project.OpenBucket(ctx, bucket, access)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not open bucket %q: %v", bucket, err)
|
|
|
|
}
|
2019-08-28 15:29:35 +02:00
|
|
|
instance.logger = logger
|
2019-08-28 15:18:00 +02:00
|
|
|
|
|
|
|
return &instance, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *StorjStorage) Type() string {
|
|
|
|
return "storj"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *StorjStorage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
|
|
|
|
ctx := context.TODO()
|
|
|
|
|
|
|
|
obj, err := s.bucket.OpenObject(ctx, key)
|
|
|
|
if err != nil {
|
|
|
|
return "", 0, fmt.Errorf("unable to open object %v", err)
|
|
|
|
}
|
|
|
|
contentType = obj.Meta.ContentType
|
|
|
|
contentLength = uint64(obj.Meta.Size)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *StorjStorage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
ctx := context.TODO()
|
|
|
|
|
|
|
|
obj, err := s.bucket.OpenObject(ctx, key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, "", 0, fmt.Errorf("unable to open object %v", err)
|
|
|
|
}
|
|
|
|
contentType = obj.Meta.ContentType
|
|
|
|
contentLength = uint64(obj.Meta.Size)
|
|
|
|
reader, err = obj.DownloadRange(ctx, 0, -1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *StorjStorage) Delete(token string, filename string) (err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
|
|
|
|
ctx := context.TODO()
|
|
|
|
|
|
|
|
err = s.bucket.DeleteObject(ctx, key)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *StorjStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
|
|
|
|
key := fmt.Sprintf("%s/%s", token, filename)
|
|
|
|
|
|
|
|
s.logger.Printf("Uploading file %s to S3 Bucket", filename)
|
|
|
|
|
|
|
|
ctx := context.TODO()
|
|
|
|
|
|
|
|
err = s.bucket.UploadObject(ctx, key, reader, &uplink.UploadOptions{ContentType: contentType})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not upload: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func toStorjKey(key string) (newKey storj.Key) {
|
|
|
|
var encryptionKey storj.Key
|
|
|
|
copy(encryptionKey[:], []byte(key))
|
|
|
|
return encryptionKey
|
|
|
|
}
|