mirror of
https://github.com/dutchcoders/transfer.sh.git
synced 2024-11-27 06:30:19 +01:00
Fix s3 100MB max upload issue.
This commit is contained in:
parent
ce043efca0
commit
372ff5f368
4 changed files with 13 additions and 6 deletions
|
@ -165,6 +165,7 @@ aws-secret-key | aws access key | | AWS_SECRET_KEY
|
||||||
bucket | aws bucket | | BUCKET
|
bucket | aws bucket | | BUCKET
|
||||||
s3-region | region of the s3 bucket | eu-west-1 | S3_REGION
|
s3-region | region of the s3 bucket | eu-west-1 | S3_REGION
|
||||||
s3-no-multipart | disables s3 multipart upload | false | |
|
s3-no-multipart | disables s3 multipart upload | false | |
|
||||||
|
s3-part-size | Size of parts for S3 multipart upload. | 5(MB) | |
|
||||||
s3-path-style | Forces path style URLs, required for Minio. | false | |
|
s3-path-style | Forces path style URLs, required for Minio. | false | |
|
||||||
basedir | path storage for local/gdrive provider| |
|
basedir | path storage for local/gdrive provider| |
|
||||||
gdrive-client-json-filepath | path to oauth client json config for gdrive provider| |
|
gdrive-client-json-filepath | path to oauth client json config for gdrive provider| |
|
||||||
|
|
|
@ -130,6 +130,11 @@ var globalFlags = []cli.Flag{
|
||||||
Name: "s3-no-multipart",
|
Name: "s3-no-multipart",
|
||||||
Usage: "Disables S3 Multipart Puts",
|
Usage: "Disables S3 Multipart Puts",
|
||||||
},
|
},
|
||||||
|
cli.Int64Flag{
|
||||||
|
Name: "s3-part-size",
|
||||||
|
Usage: "Size of parts for S3 multipart upload, default 5(MB)",
|
||||||
|
Value: 5,
|
||||||
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "s3-path-style",
|
Name: "s3-path-style",
|
||||||
Usage: "Forces path style URLs, required for Minio.",
|
Usage: "Forces path style URLs, required for Minio.",
|
||||||
|
@ -343,7 +348,7 @@ func New() *Cmd {
|
||||||
panic("secret-key not set.")
|
panic("secret-key not set.")
|
||||||
} else if bucket := c.String("bucket"); bucket == "" {
|
} else if bucket := c.String("bucket"); bucket == "" {
|
||||||
panic("bucket not set.")
|
panic("bucket not set.")
|
||||||
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, c.String("s3-region"), c.String("s3-endpoint"), logger, c.Bool("s3-no-multipart"), c.Bool("s3-path-style")); err != nil {
|
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, c.String("s3-region"), c.String("s3-endpoint"), logger, c.Bool("s3-no-multipart"), c.Int64("s3-part-size"), c.Bool("s3-path-style")); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
} else {
|
} else {
|
||||||
options = append(options, server.UseStorage(storage))
|
options = append(options, server.UseStorage(storage))
|
||||||
|
|
|
@ -130,12 +130,13 @@ type S3Storage struct {
|
||||||
s3 *s3.S3
|
s3 *s3.S3
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
noMultipart bool
|
noMultipart bool
|
||||||
|
partSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewS3Storage(accessKey, secretKey, bucketName, region, endpoint string, logger *log.Logger, disableMultipart bool, forcePathStyle bool) (*S3Storage, error) {
|
func NewS3Storage(accessKey, secretKey, bucketName, region, endpoint string, logger *log.Logger, disableMultipart bool, partSize int64, forcePathStyle bool) (*S3Storage, error) {
|
||||||
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)
|
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)
|
||||||
|
|
||||||
return &S3Storage{bucket: bucketName, s3: s3.New(sess), session: sess, logger: logger, noMultipart: disableMultipart}, nil
|
return &S3Storage{bucket: bucketName, s3: s3.New(sess), session: sess, logger: logger, noMultipart: disableMultipart, partSize: partSize}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S3Storage) Type() string {
|
func (s *S3Storage) Type() string {
|
||||||
|
@ -243,9 +244,8 @@ func (s *S3Storage) Put(token string, filename string, reader io.Reader, content
|
||||||
|
|
||||||
// Create an uploader with the session and custom options
|
// Create an uploader with the session and custom options
|
||||||
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
|
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
|
||||||
u.PartSize = (1 << 20) * 5 // The minimum/default allowed part size is 5MB
|
u.PartSize = s.partSize * 1024 * 1024 // The minimum/default allowed part size is 5MB
|
||||||
u.Concurrency = concurrency // default is 5
|
u.Concurrency = concurrency // default is 5
|
||||||
u.MaxUploadParts = concurrency
|
|
||||||
u.LeavePartsOnError = false
|
u.LeavePartsOnError = false
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,7 @@ func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle
|
||||||
Endpoint: aws.String(endpoint),
|
Endpoint: aws.String(endpoint),
|
||||||
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
||||||
S3ForcePathStyle: aws.Bool(forcePathStyle),
|
S3ForcePathStyle: aws.Bool(forcePathStyle),
|
||||||
|
//LogLevel: aws.LogLevel(aws.LogDebug),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue