2014-10-16 20:01:43 +02:00
|
|
|
/*
|
|
|
|
The MIT License (MIT)
|
|
|
|
|
2017-03-23 00:02:36 +01:00
|
|
|
Copyright (c) 2014-2017 DutchCoders [https://github.com/dutchcoders/]
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
of this software and associated documentation files (the "Software"), to deal
|
|
|
|
in the Software without restriction, including without limitation the rights
|
|
|
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
copies of the Software, and to permit persons to whom the Software is
|
|
|
|
furnished to do so, subject to the following conditions:
|
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be included in
|
|
|
|
all copies or substantial portions of the Software.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
package server
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
// _ "transfer.sh/app/handlers"
|
|
|
|
// _ "transfer.sh/app/utils"
|
|
|
|
|
|
|
|
"archive/tar"
|
|
|
|
"archive/zip"
|
|
|
|
"bytes"
|
|
|
|
"compress/gzip"
|
2017-03-28 16:12:31 +02:00
|
|
|
"encoding/json"
|
2014-10-16 20:01:43 +02:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-05-11 15:12:38 +02:00
|
|
|
blackfriday "gopkg.in/russross/blackfriday.v2"
|
2016-02-19 10:40:26 +01:00
|
|
|
"html"
|
2014-10-20 14:54:42 +02:00
|
|
|
html_template "html/template"
|
2014-10-16 20:01:43 +02:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"math/rand"
|
|
|
|
"mime"
|
|
|
|
"net/http"
|
2017-03-22 22:23:29 +01:00
|
|
|
"net/url"
|
2014-10-16 20:01:43 +02:00
|
|
|
"os"
|
2017-03-22 22:23:29 +01:00
|
|
|
"path"
|
2014-10-16 20:01:43 +02:00
|
|
|
"path/filepath"
|
2014-10-20 14:54:42 +02:00
|
|
|
"strconv"
|
2014-10-16 20:01:43 +02:00
|
|
|
"strings"
|
2017-03-28 16:12:31 +02:00
|
|
|
"sync"
|
2014-10-16 20:01:43 +02:00
|
|
|
text_template "text/template"
|
2014-10-20 14:54:42 +02:00
|
|
|
"time"
|
2016-01-14 09:33:02 +01:00
|
|
|
|
2017-03-22 22:23:29 +01:00
|
|
|
"net"
|
|
|
|
|
2019-05-11 14:42:59 +02:00
|
|
|
"encoding/base64"
|
2017-03-22 18:09:21 +01:00
|
|
|
web "github.com/dutchcoders/transfer.sh-web"
|
2016-01-14 09:33:02 +01:00
|
|
|
"github.com/gorilla/mux"
|
2019-05-11 14:42:59 +02:00
|
|
|
"github.com/microcosm-cc/bluemonday"
|
|
|
|
"github.com/skip2/go-qrcode"
|
2014-10-16 20:01:43 +02:00
|
|
|
)
|
|
|
|
|
2019-05-18 14:13:23 +02:00
|
|
|
const getPathPart = "get"
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
var (
|
2017-03-22 22:25:42 +01:00
|
|
|
htmlTemplates = initHTMLTemplates()
|
|
|
|
textTemplates = initTextTemplates()
|
2017-03-22 18:09:21 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
func stripPrefix(path string) string {
|
|
|
|
return strings.Replace(path, web.Prefix+"/", "", -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func initTextTemplates() *text_template.Template {
|
|
|
|
templateMap := text_template.FuncMap{"format": formatNumber}
|
|
|
|
|
|
|
|
// Templates with functions available to them
|
|
|
|
var templates = text_template.New("").Funcs(templateMap)
|
|
|
|
return templates
|
|
|
|
}
|
|
|
|
|
|
|
|
func initHTMLTemplates() *html_template.Template {
|
|
|
|
templateMap := html_template.FuncMap{"format": formatNumber}
|
|
|
|
|
|
|
|
// Templates with functions available to them
|
|
|
|
var templates = html_template.New("").Funcs(templateMap)
|
|
|
|
|
|
|
|
return templates
|
|
|
|
}
|
|
|
|
|
2014-10-16 20:01:43 +02:00
|
|
|
func healthHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprintf(w, "Approaching Neutral Zone, all systems normal and functioning.")
|
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
/* The preview handler will show a preview of the content for browsers (accept type text/html), and referer is not transfer.sh */
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) previewHandler(w http.ResponseWriter, r *http.Request) {
|
2014-11-13 21:41:43 +01:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
token := vars["token"]
|
|
|
|
filename := vars["filename"]
|
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
contentType, contentLength, err := s.storage.Head(token, filename)
|
2014-11-13 21:41:43 +01:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, http.StatusText(404), 404)
|
|
|
|
return
|
|
|
|
}
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
var templatePath string
|
|
|
|
var content html_template.HTML
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(contentType, "image/"):
|
|
|
|
templatePath = "download.image.html"
|
|
|
|
case strings.HasPrefix(contentType, "video/"):
|
|
|
|
templatePath = "download.video.html"
|
|
|
|
case strings.HasPrefix(contentType, "audio/"):
|
|
|
|
templatePath = "download.audio.html"
|
|
|
|
case strings.HasPrefix(contentType, "text/"):
|
|
|
|
templatePath = "download.markdown.html"
|
|
|
|
|
|
|
|
var reader io.ReadCloser
|
2017-03-22 22:27:26 +01:00
|
|
|
if reader, _, _, err = s.storage.Get(token, filename); err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
var data []byte
|
2019-04-28 21:36:45 +02:00
|
|
|
data = make([]byte, _5M)
|
|
|
|
if _, err = reader.Read(data); err != io.EOF && err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
2014-11-13 21:41:43 +01:00
|
|
|
|
|
|
|
if strings.HasPrefix(contentType, "text/x-markdown") || strings.HasPrefix(contentType, "text/markdown") {
|
2019-05-11 14:47:58 +02:00
|
|
|
unsafe := blackfriday.Run(data)
|
2019-05-11 14:42:59 +02:00
|
|
|
output := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
2014-11-13 21:41:43 +01:00
|
|
|
content = html_template.HTML(output)
|
|
|
|
} else if strings.HasPrefix(contentType, "text/plain") {
|
2016-02-19 10:40:26 +01:00
|
|
|
content = html_template.HTML(fmt.Sprintf("<pre>%s</pre>", html.EscapeString(string(data))))
|
2014-11-13 21:41:43 +01:00
|
|
|
} else {
|
2016-01-14 09:33:02 +01:00
|
|
|
templatePath = "download.sandbox.html"
|
2014-11-13 21:41:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
templatePath = "download.html"
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
|
|
|
|
2019-05-18 14:13:23 +02:00
|
|
|
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
|
|
|
|
resolvedURL := resolveURL(r, getURL(r).ResolveReference(relativeURL), true)
|
|
|
|
relativeURLGet, _ := url.Parse(path.Join(s.proxyPath, getPathPart, token, filename))
|
|
|
|
resolvedURLGet := resolveURL(r, getURL(r).ResolveReference(relativeURLGet), true)
|
2018-07-01 13:51:13 +02:00
|
|
|
var png []byte
|
2019-05-18 14:13:23 +02:00
|
|
|
png, err = qrcode.Encode(resolvedURL, qrcode.High, 150)
|
2018-07-01 13:51:13 +02:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
qrCode := base64.StdEncoding.EncodeToString(png)
|
|
|
|
|
2018-12-15 21:16:30 +01:00
|
|
|
hostname := getURL(r).Host
|
2019-03-30 12:35:57 +01:00
|
|
|
webAddress := resolveWebAddress(r, s.proxyPath)
|
2018-12-15 21:16:30 +01:00
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
data := struct {
|
|
|
|
ContentType string
|
|
|
|
Content html_template.HTML
|
|
|
|
Filename string
|
|
|
|
Url string
|
2019-05-18 14:13:23 +02:00
|
|
|
UrlGet string
|
2018-12-15 21:16:30 +01:00
|
|
|
Hostname string
|
|
|
|
WebAddress string
|
2014-11-13 21:41:43 +01:00
|
|
|
ContentLength uint64
|
2018-07-07 20:23:50 +02:00
|
|
|
GAKey string
|
2018-06-26 18:39:56 +02:00
|
|
|
UserVoiceKey string
|
2018-07-07 20:23:50 +02:00
|
|
|
QRCode string
|
2014-11-13 21:41:43 +01:00
|
|
|
}{
|
|
|
|
contentType,
|
|
|
|
content,
|
|
|
|
filename,
|
2019-05-18 14:13:23 +02:00
|
|
|
resolvedURL,
|
|
|
|
resolvedURLGet,
|
2018-12-15 21:16:30 +01:00
|
|
|
hostname,
|
|
|
|
webAddress,
|
2014-11-13 21:41:43 +01:00
|
|
|
contentLength,
|
2018-06-26 18:39:56 +02:00
|
|
|
s.gaKey,
|
|
|
|
s.userVoiceKey,
|
2018-07-01 13:51:13 +02:00
|
|
|
qrCode,
|
2014-11-13 21:41:43 +01:00
|
|
|
}
|
|
|
|
|
2017-03-22 22:25:42 +01:00
|
|
|
if err := htmlTemplates.ExecuteTemplate(w, templatePath, data); err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// this handler will output html or text, depending on the
|
|
|
|
// support of the client (Accept header).
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) viewHandler(w http.ResponseWriter, r *http.Request) {
|
2014-11-13 21:41:43 +01:00
|
|
|
// vars := mux.Vars(r)
|
|
|
|
|
2018-12-15 21:16:30 +01:00
|
|
|
hostname := getURL(r).Host
|
2019-03-30 12:35:57 +01:00
|
|
|
webAddress := resolveWebAddress(r, s.proxyPath)
|
2018-12-15 21:16:30 +01:00
|
|
|
|
|
|
|
data := struct {
|
2018-12-15 21:42:18 +01:00
|
|
|
Hostname string
|
|
|
|
WebAddress string
|
|
|
|
GAKey string
|
|
|
|
UserVoiceKey string
|
2018-12-15 21:16:30 +01:00
|
|
|
}{
|
|
|
|
hostname,
|
|
|
|
webAddress,
|
|
|
|
s.gaKey,
|
2018-12-15 21:42:18 +01:00
|
|
|
s.userVoiceKey,
|
2018-12-15 21:16:30 +01:00
|
|
|
}
|
|
|
|
|
2017-03-22 21:09:40 +01:00
|
|
|
if acceptsHTML(r.Header) {
|
2018-12-15 21:16:30 +01:00
|
|
|
if err := htmlTemplates.ExecuteTemplate(w, "index.html", data); err != nil {
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
2018-12-15 21:16:30 +01:00
|
|
|
if err := textTemplates.ExecuteTemplate(w, "index.txt", data); err != nil {
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) notFoundHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-20 14:54:42 +02:00
|
|
|
http.Error(w, http.StatusText(404), 404)
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
|
|
|
|
2017-03-23 11:46:59 +01:00
|
|
|
func sanitize(fileName string) string {
|
|
|
|
return path.Clean(path.Base(fileName))
|
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) postHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-16 20:01:43 +02:00
|
|
|
if err := r.ParseMultipartForm(_24K); nil != err {
|
2014-11-13 21:41:43 +01:00
|
|
|
log.Printf("%s", err.Error())
|
2016-08-10 20:50:36 +02:00
|
|
|
http.Error(w, "Error occurred copying to output stream", 500)
|
2014-10-16 20:01:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
token := Encode(10000000 + int64(rand.Intn(1000000000)))
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
|
|
|
|
|
|
for _, fheaders := range r.MultipartForm.File {
|
|
|
|
for _, fheader := range fheaders {
|
2017-03-23 11:46:59 +01:00
|
|
|
filename := sanitize(fheader.Filename)
|
2014-10-16 20:01:43 +02:00
|
|
|
contentType := fheader.Header.Get("Content-Type")
|
|
|
|
|
|
|
|
if contentType == "" {
|
|
|
|
contentType = mime.TypeByExtension(filepath.Ext(fheader.Filename))
|
|
|
|
}
|
|
|
|
|
|
|
|
var f io.Reader
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if f, err = fheader.Open(); err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
log.Printf("%s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var b bytes.Buffer
|
|
|
|
|
|
|
|
n, err := io.CopyN(&b, f, _24K+1)
|
|
|
|
if err != nil && err != io.EOF {
|
2014-11-13 21:41:43 +01:00
|
|
|
log.Printf("%s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-16 19:58:11 +01:00
|
|
|
var file *os.File
|
2014-10-16 20:01:43 +02:00
|
|
|
var reader io.Reader
|
|
|
|
|
|
|
|
if n > _24K {
|
2019-01-16 19:58:11 +01:00
|
|
|
file, err = ioutil.TempFile(s.tempPath, "transfer-")
|
2014-10-16 20:01:43 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err = io.Copy(file, io.MultiReader(&b, f))
|
|
|
|
if err != nil {
|
2019-01-16 19:58:11 +01:00
|
|
|
cleanTmpFile(file)
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2014-11-13 21:41:43 +01:00
|
|
|
log.Printf("%s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
reader, err = os.Open(file.Name())
|
|
|
|
} else {
|
|
|
|
reader = bytes.NewReader(b.Bytes())
|
|
|
|
}
|
|
|
|
|
|
|
|
contentLength := n
|
|
|
|
|
2017-03-28 16:12:31 +02:00
|
|
|
metadata := MetadataForRequest(contentType, r)
|
|
|
|
|
|
|
|
buffer := &bytes.Buffer{}
|
|
|
|
if err := json.NewEncoder(buffer).Encode(metadata); err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, errors.New("Could not encode metadata").Error(), 500)
|
2019-01-16 19:58:11 +01:00
|
|
|
|
|
|
|
cleanTmpFile(file)
|
2017-03-28 16:12:31 +02:00
|
|
|
return
|
|
|
|
} else if err := s.storage.Put(token, fmt.Sprintf("%s.metadata", filename), buffer, "text/json", uint64(buffer.Len())); err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, errors.New("Could not save metadata").Error(), 500)
|
2019-01-16 19:58:11 +01:00
|
|
|
|
|
|
|
cleanTmpFile(file)
|
2017-03-28 16:12:31 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
log.Printf("Uploading %s %s %d %s", token, filename, contentLength, contentType)
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
if err = s.storage.Put(token, filename, reader, contentType, uint64(contentLength)); err != nil {
|
|
|
|
log.Printf("Backend storage error: %s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-06-13 16:35:10 +02:00
|
|
|
filename = url.PathEscape(filename)
|
2019-03-30 12:35:57 +01:00
|
|
|
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
|
2018-09-15 13:24:11 +02:00
|
|
|
fmt.Fprintln(w, getURL(r).ResolveReference(relativeURL).String())
|
2019-01-16 19:58:11 +01:00
|
|
|
|
|
|
|
cleanTmpFile(file)
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-16 19:58:11 +01:00
|
|
|
func cleanTmpFile(f *os.File) {
|
|
|
|
if f != nil {
|
2019-04-28 21:36:45 +02:00
|
|
|
err := f.Close()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error closing tmpfile: %s (%s)", err, f.Name())
|
|
|
|
}
|
|
|
|
|
|
|
|
err = os.Remove(f.Name())
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Error removing tmpfile: %s (%s)", err, f.Name())
|
|
|
|
}
|
2019-01-16 19:58:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 16:12:31 +02:00
|
|
|
type Metadata struct {
|
|
|
|
// ContentType is the original uploading content type
|
|
|
|
ContentType string
|
|
|
|
// Secret as knowledge to delete file
|
|
|
|
// Secret string
|
|
|
|
// Downloads is the actual number of downloads
|
|
|
|
Downloads int
|
|
|
|
// MaxDownloads contains the maximum numbers of downloads
|
|
|
|
MaxDownloads int
|
|
|
|
// MaxDate contains the max age of the file
|
|
|
|
MaxDate time.Time
|
2018-06-24 06:46:57 +02:00
|
|
|
// DeletionToken contains the token to match against for deletion
|
|
|
|
DeletionToken string
|
2017-03-28 16:12:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func MetadataForRequest(contentType string, r *http.Request) Metadata {
|
|
|
|
metadata := Metadata{
|
2018-06-24 06:46:57 +02:00
|
|
|
ContentType: contentType,
|
|
|
|
MaxDate: time.Now().Add(time.Hour * 24 * 365 * 10),
|
|
|
|
Downloads: 0,
|
|
|
|
MaxDownloads: 99999999,
|
2018-07-07 20:23:50 +02:00
|
|
|
DeletionToken: Encode(10000000+int64(rand.Intn(1000000000))) + Encode(10000000+int64(rand.Intn(1000000000))),
|
2017-03-28 16:12:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if v := r.Header.Get("Max-Downloads"); v == "" {
|
|
|
|
} else if v, err := strconv.Atoi(v); err != nil {
|
|
|
|
} else {
|
|
|
|
metadata.MaxDownloads = v
|
|
|
|
}
|
|
|
|
|
|
|
|
if v := r.Header.Get("Max-Days"); v == "" {
|
|
|
|
} else if v, err := strconv.Atoi(v); err != nil {
|
|
|
|
} else {
|
|
|
|
metadata.MaxDate = time.Now().Add(time.Hour * 24 * time.Duration(v))
|
|
|
|
}
|
|
|
|
|
|
|
|
return metadata
|
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-16 20:01:43 +02:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
2017-03-23 11:46:59 +01:00
|
|
|
filename := sanitize(vars["filename"])
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
contentLength := r.ContentLength
|
|
|
|
|
|
|
|
var reader io.Reader
|
|
|
|
|
|
|
|
reader = r.Body
|
|
|
|
|
2019-03-18 19:09:22 +01:00
|
|
|
defer r.Body.Close()
|
|
|
|
|
2014-10-16 20:01:43 +02:00
|
|
|
if contentLength == -1 {
|
|
|
|
// queue file to disk, because s3 needs content length
|
|
|
|
var err error
|
|
|
|
var f io.Reader
|
|
|
|
|
|
|
|
f = reader
|
|
|
|
|
|
|
|
var b bytes.Buffer
|
|
|
|
|
|
|
|
n, err := io.CopyN(&b, f, _24K+1)
|
|
|
|
if err != nil && err != io.EOF {
|
2017-03-22 22:28:40 +01:00
|
|
|
log.Printf("Error putting new file: %s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-16 19:58:11 +01:00
|
|
|
var file *os.File
|
|
|
|
|
2014-10-16 20:01:43 +02:00
|
|
|
if n > _24K {
|
2019-01-16 19:58:11 +01:00
|
|
|
file, err = ioutil.TempFile(s.tempPath, "transfer-")
|
2014-10-16 20:01:43 +02:00
|
|
|
if err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
log.Printf("%s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-16 19:58:11 +01:00
|
|
|
defer cleanTmpFile(file)
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
n, err = io.Copy(file, io.MultiReader(&b, f))
|
|
|
|
if err != nil {
|
2014-11-13 21:41:43 +01:00
|
|
|
log.Printf("%s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
reader, err = os.Open(file.Name())
|
|
|
|
} else {
|
|
|
|
reader = bytes.NewReader(b.Bytes())
|
|
|
|
}
|
|
|
|
|
|
|
|
contentLength = n
|
|
|
|
}
|
|
|
|
|
2018-07-07 09:55:46 +02:00
|
|
|
if contentLength == 0 {
|
|
|
|
log.Print("Empty content-length")
|
2019-03-30 02:30:12 +01:00
|
|
|
http.Error(w, errors.New("Could not upload empty file").Error(), 400)
|
2018-07-07 09:55:46 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-16 20:01:43 +02:00
|
|
|
contentType := r.Header.Get("Content-Type")
|
|
|
|
|
|
|
|
if contentType == "" {
|
|
|
|
contentType = mime.TypeByExtension(filepath.Ext(vars["filename"]))
|
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
token := Encode(10000000 + int64(rand.Intn(1000000000)))
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2017-03-28 16:12:31 +02:00
|
|
|
metadata := MetadataForRequest(contentType, r)
|
|
|
|
|
|
|
|
buffer := &bytes.Buffer{}
|
|
|
|
if err := json.NewEncoder(buffer).Encode(metadata); err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, errors.New("Could not encode metadata").Error(), 500)
|
|
|
|
return
|
|
|
|
} else if err := s.storage.Put(token, fmt.Sprintf("%s.metadata", filename), buffer, "text/json", uint64(buffer.Len())); err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, errors.New("Could not save metadata").Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-01-29 16:40:04 +01:00
|
|
|
log.Printf("Uploading %s %s %d %s", token, filename, contentLength, contentType)
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
var err error
|
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
if err = s.storage.Put(token, filename, reader, contentType, uint64(contentLength)); err != nil {
|
|
|
|
log.Printf("Error putting new file: %s", err.Error())
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, errors.New("Could not save file").Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// w.Statuscode = 200
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "text/plain")
|
|
|
|
|
2019-06-13 16:35:10 +02:00
|
|
|
filename = url.PathEscape(filename)
|
2019-03-30 12:35:57 +01:00
|
|
|
relativeURL, _ := url.Parse(path.Join(s.proxyPath, token, filename))
|
2019-05-18 14:13:23 +02:00
|
|
|
deleteURL, _ := url.Parse(path.Join(s.proxyPath, token, filename, metadata.DeletionToken))
|
2018-06-23 11:47:27 +02:00
|
|
|
|
2019-05-18 14:13:23 +02:00
|
|
|
w.Header().Set("X-Url-Delete", resolveURL(r, deleteURL, true))
|
2018-06-24 06:46:57 +02:00
|
|
|
|
2019-05-18 14:13:23 +02:00
|
|
|
fmt.Fprint(w, resolveURL(r, relativeURL, false))
|
2018-06-23 11:47:27 +02:00
|
|
|
}
|
|
|
|
|
2019-05-18 14:13:23 +02:00
|
|
|
func resolveURL(r *http.Request, u *url.URL, absolutePath bool) string {
|
2018-06-24 06:46:57 +02:00
|
|
|
if absolutePath {
|
|
|
|
r.URL.Path = ""
|
|
|
|
}
|
|
|
|
|
2018-06-23 11:47:27 +02:00
|
|
|
return getURL(r).ResolveReference(u).String()
|
2017-03-22 22:23:29 +01:00
|
|
|
}
|
|
|
|
|
2019-03-30 12:35:57 +01:00
|
|
|
func resolveKey(key, proxyPath string) string {
|
|
|
|
if strings.HasPrefix(key, "/") {
|
|
|
|
key = key[1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(key, proxyPath) {
|
|
|
|
key = key[len(proxyPath):]
|
|
|
|
}
|
|
|
|
|
|
|
|
key = strings.Replace(key, "\\", "/", -1)
|
|
|
|
|
|
|
|
return key
|
|
|
|
}
|
|
|
|
|
|
|
|
func resolveWebAddress(r *http.Request, proxyPath string) string {
|
2018-12-15 21:16:30 +01:00
|
|
|
url := getURL(r)
|
|
|
|
|
2019-03-30 12:35:57 +01:00
|
|
|
var webAddress string
|
|
|
|
|
|
|
|
if len(proxyPath) == 0 {
|
2019-05-18 14:13:23 +02:00
|
|
|
webAddress = fmt.Sprintf("%s://%s/",
|
2019-03-30 12:35:57 +01:00
|
|
|
url.ResolveReference(url).Scheme,
|
|
|
|
url.ResolveReference(url).Host)
|
|
|
|
} else {
|
|
|
|
webAddress = fmt.Sprintf("%s://%s/%s",
|
|
|
|
url.ResolveReference(url).Scheme,
|
|
|
|
url.ResolveReference(url).Host,
|
|
|
|
proxyPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
return webAddress
|
2018-12-15 21:16:30 +01:00
|
|
|
}
|
|
|
|
|
2017-03-22 22:23:29 +01:00
|
|
|
func getURL(r *http.Request) *url.URL {
|
2018-12-15 21:16:30 +01:00
|
|
|
u, _ := url.Parse(r.URL.String())
|
2017-03-22 22:23:29 +01:00
|
|
|
|
|
|
|
if r.TLS != nil {
|
|
|
|
u.Scheme = "https"
|
|
|
|
} else if proto := r.Header.Get("X-Forwarded-Proto"); proto != "" {
|
|
|
|
u.Scheme = proto
|
|
|
|
} else {
|
|
|
|
u.Scheme = "http"
|
|
|
|
}
|
|
|
|
|
|
|
|
if u.Host != "" {
|
|
|
|
} else if host, port, err := net.SplitHostPort(r.Host); err != nil {
|
2017-03-22 23:39:59 +01:00
|
|
|
u.Host = r.Host
|
2017-03-22 22:23:29 +01:00
|
|
|
} else {
|
|
|
|
if port == "80" && u.Scheme == "http" {
|
|
|
|
u.Host = host
|
|
|
|
} else if port == "443" && u.Scheme == "https" {
|
|
|
|
u.Host = host
|
|
|
|
} else {
|
|
|
|
u.Host = net.JoinHostPort(host, port)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-15 21:16:30 +01:00
|
|
|
return u
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
func calcRemainingLimits(metadata Metadata) (int, int) {
|
|
|
|
remainingDownloads := metadata.MaxDownloads - metadata.Downloads
|
|
|
|
timeDifference := metadata.MaxDate.Sub(time.Now())
|
|
|
|
remainingDays := int(timeDifference.Hours()/24) + 1
|
|
|
|
|
|
|
|
return remainingDownloads, remainingDays
|
|
|
|
}
|
|
|
|
|
2017-03-28 16:12:31 +02:00
|
|
|
func (s *Server) Lock(token, filename string) error {
|
|
|
|
key := path.Join(token, filename)
|
|
|
|
|
|
|
|
if _, ok := s.locks[key]; !ok {
|
|
|
|
s.locks[key] = &sync.Mutex{}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.locks[key].Lock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) Unlock(token, filename string) error {
|
|
|
|
key := path.Join(token, filename)
|
|
|
|
s.locks[key].Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
func (s *Server) CheckMetadata(token, filename string, increaseDownload bool) (Metadata, error) {
|
2017-03-28 16:12:31 +02:00
|
|
|
s.Lock(token, filename)
|
|
|
|
defer s.Unlock(token, filename)
|
|
|
|
|
|
|
|
var metadata Metadata
|
|
|
|
|
|
|
|
r, _, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
|
|
|
|
if s.storage.IsNotExist(err) {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, nil
|
2017-03-28 16:12:31 +02:00
|
|
|
} else if err != nil {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, err
|
2017-03-28 16:12:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
defer r.Close()
|
|
|
|
|
|
|
|
if err := json.NewDecoder(r).Decode(&metadata); err != nil {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, err
|
2017-03-28 16:12:31 +02:00
|
|
|
} else if metadata.Downloads >= metadata.MaxDownloads {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, errors.New("MaxDownloads expired.")
|
2017-03-28 16:12:31 +02:00
|
|
|
} else if time.Now().After(metadata.MaxDate) {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, errors.New("MaxDate expired.")
|
2017-03-28 16:12:31 +02:00
|
|
|
} else {
|
|
|
|
// todo(nl5887): mutex?
|
|
|
|
|
|
|
|
// update number of downloads
|
2019-06-17 02:43:22 +02:00
|
|
|
if increaseDownload {
|
|
|
|
metadata.Downloads++
|
|
|
|
}
|
2017-03-28 16:12:31 +02:00
|
|
|
|
|
|
|
buffer := &bytes.Buffer{}
|
|
|
|
if err := json.NewEncoder(buffer).Encode(metadata); err != nil {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, errors.New("Could not encode metadata")
|
2017-03-28 16:12:31 +02:00
|
|
|
} else if err := s.storage.Put(token, fmt.Sprintf("%s.metadata", filename), buffer, "text/json", uint64(buffer.Len())); err != nil {
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, errors.New("Could not save metadata")
|
2017-03-28 16:12:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
return metadata, nil
|
2017-03-28 16:12:31 +02:00
|
|
|
}
|
|
|
|
|
2018-06-24 06:46:57 +02:00
|
|
|
func (s *Server) CheckDeletionToken(deletionToken, token, filename string) error {
|
|
|
|
s.Lock(token, filename)
|
|
|
|
defer s.Unlock(token, filename)
|
|
|
|
|
|
|
|
var metadata Metadata
|
|
|
|
|
|
|
|
r, _, _, err := s.storage.Get(token, fmt.Sprintf("%s.metadata", filename))
|
|
|
|
if s.storage.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer r.Close()
|
|
|
|
|
|
|
|
if err := json.NewDecoder(r).Decode(&metadata); err != nil {
|
|
|
|
return err
|
|
|
|
} else if metadata.DeletionToken != deletionToken {
|
|
|
|
return errors.New("Deletion token doesn't match.")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) deleteHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
token := vars["token"]
|
|
|
|
filename := vars["filename"]
|
|
|
|
deletionToken := vars["deletionToken"]
|
|
|
|
|
|
|
|
if err := s.CheckDeletionToken(deletionToken, token, filename); err != nil {
|
|
|
|
log.Printf("Error metadata: %s", err.Error())
|
|
|
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s.storage.Delete(token, filename)
|
|
|
|
if s.storage.IsNotExist(err) {
|
|
|
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Could not delete file.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) zipHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-16 20:01:43 +02:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
files := vars["files"]
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
zipfilename := fmt.Sprintf("transfersh-%d.zip", uint16(time.Now().UnixNano()))
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/zip")
|
2014-10-20 13:38:40 +02:00
|
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", zipfilename))
|
2014-10-16 20:01:43 +02:00
|
|
|
w.Header().Set("Connection", "close")
|
|
|
|
|
|
|
|
zw := zip.NewWriter(w)
|
|
|
|
|
|
|
|
for _, key := range strings.Split(files, ",") {
|
2019-03-30 12:35:57 +01:00
|
|
|
key = resolveKey(key, s.proxyPath)
|
2014-11-13 21:41:43 +01:00
|
|
|
|
|
|
|
token := strings.Split(key, "/")[0]
|
2017-03-23 11:46:59 +01:00
|
|
|
filename := sanitize(strings.Split(key, "/")[1])
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
if _, err := s.CheckMetadata(token, filename, true); err != nil {
|
2017-03-28 16:12:31 +02:00
|
|
|
log.Printf("Error metadata: %s", err.Error())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
reader, _, _, err := s.storage.Get(token, filename)
|
2014-11-13 21:41:43 +01:00
|
|
|
|
2014-10-16 20:01:43 +02:00
|
|
|
if err != nil {
|
2017-03-22 22:27:26 +01:00
|
|
|
if s.storage.IsNotExist(err) {
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, "File not found", 404)
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Could not retrieve file.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
defer reader.Close()
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
header := &zip.FileHeader{
|
|
|
|
Name: strings.Split(key, "/")[1],
|
|
|
|
Method: zip.Store,
|
|
|
|
ModifiedTime: uint16(time.Now().UnixNano()),
|
|
|
|
ModifiedDate: uint16(time.Now().UnixNano()),
|
|
|
|
}
|
|
|
|
|
|
|
|
fw, err := zw.CreateHeader(header)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
if _, err = io.Copy(fw, reader); err != nil {
|
2014-10-16 20:01:43 +02:00
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := zw.Close(); err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-16 20:01:43 +02:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
files := vars["files"]
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
tarfilename := fmt.Sprintf("transfersh-%d.tar.gz", uint16(time.Now().UnixNano()))
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/x-gzip")
|
2014-10-20 13:38:40 +02:00
|
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", tarfilename))
|
2014-10-16 20:01:43 +02:00
|
|
|
w.Header().Set("Connection", "close")
|
|
|
|
|
|
|
|
os := gzip.NewWriter(w)
|
|
|
|
defer os.Close()
|
|
|
|
|
|
|
|
zw := tar.NewWriter(os)
|
|
|
|
defer zw.Close()
|
|
|
|
|
|
|
|
for _, key := range strings.Split(files, ",") {
|
2019-03-30 12:35:57 +01:00
|
|
|
key = resolveKey(key, s.proxyPath)
|
2014-11-13 21:41:43 +01:00
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
token := strings.Split(key, "/")[0]
|
2017-03-23 11:46:59 +01:00
|
|
|
filename := sanitize(strings.Split(key, "/")[1])
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
if _, err := s.CheckMetadata(token, filename, true); err != nil {
|
2017-03-28 16:12:31 +02:00
|
|
|
log.Printf("Error metadata: %s", err.Error())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
reader, _, contentLength, err := s.storage.Get(token, filename)
|
2014-10-16 20:01:43 +02:00
|
|
|
if err != nil {
|
2017-03-22 22:27:26 +01:00
|
|
|
if s.storage.IsNotExist(err) {
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, "File not found", 404)
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Could not retrieve file.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
defer reader.Close()
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
header := &tar.Header{
|
|
|
|
Name: strings.Split(key, "/")[1],
|
|
|
|
Size: int64(contentLength),
|
|
|
|
}
|
|
|
|
|
|
|
|
err = zw.WriteHeader(header)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
if _, err = io.Copy(zw, reader); err != nil {
|
2014-10-16 20:01:43 +02:00
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-16 20:01:43 +02:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
files := vars["files"]
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
tarfilename := fmt.Sprintf("transfersh-%d.tar", uint16(time.Now().UnixNano()))
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/x-tar")
|
2014-10-20 13:38:40 +02:00
|
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", tarfilename))
|
2014-10-16 20:01:43 +02:00
|
|
|
w.Header().Set("Connection", "close")
|
|
|
|
|
|
|
|
zw := tar.NewWriter(w)
|
|
|
|
defer zw.Close()
|
|
|
|
|
|
|
|
for _, key := range strings.Split(files, ",") {
|
2019-03-30 12:35:57 +01:00
|
|
|
key = resolveKey(key, s.proxyPath)
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
token := strings.Split(key, "/")[0]
|
|
|
|
filename := strings.Split(key, "/")[1]
|
2014-10-20 13:38:40 +02:00
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
if _, err := s.CheckMetadata(token, filename, true); err != nil {
|
2017-03-28 16:12:31 +02:00
|
|
|
log.Printf("Error metadata: %s", err.Error())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
reader, _, contentLength, err := s.storage.Get(token, filename)
|
2014-10-16 20:01:43 +02:00
|
|
|
if err != nil {
|
2017-03-22 22:27:26 +01:00
|
|
|
if s.storage.IsNotExist(err) {
|
2014-10-16 20:01:43 +02:00
|
|
|
http.Error(w, "File not found", 404)
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Could not retrieve file.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
defer reader.Close()
|
2014-10-16 20:01:43 +02:00
|
|
|
|
|
|
|
header := &tar.Header{
|
|
|
|
Name: strings.Split(key, "/")[1],
|
|
|
|
Size: int64(contentLength),
|
|
|
|
}
|
|
|
|
|
|
|
|
err = zw.WriteHeader(header)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-10-20 13:38:40 +02:00
|
|
|
if _, err = io.Copy(zw, reader); err != nil {
|
2014-10-16 20:01:43 +02:00
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Internal server error.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-13 08:56:08 +02:00
|
|
|
func (s *Server) headHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
|
|
|
token := vars["token"]
|
|
|
|
filename := vars["filename"]
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
metadata, err := s.CheckMetadata(token, filename, false)
|
|
|
|
|
|
|
|
if err != nil {
|
2018-08-13 08:56:08 +02:00
|
|
|
log.Printf("Error metadata: %s", err.Error())
|
|
|
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
contentType, contentLength, err := s.storage.Head(token, filename)
|
|
|
|
if s.storage.IsNotExist(err) {
|
|
|
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Could not retrieve file.", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
remainingDownloads, remainingDays := calcRemainingLimits(metadata)
|
|
|
|
|
2018-08-13 08:56:08 +02:00
|
|
|
w.Header().Set("Content-Type", contentType)
|
|
|
|
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10))
|
|
|
|
w.Header().Set("Connection", "close")
|
2019-06-17 02:43:22 +02:00
|
|
|
w.Header().Set("X-Remaining-Downloads", strconv.Itoa(remainingDownloads))
|
|
|
|
w.Header().Set("X-Remaining-Days", strconv.Itoa(remainingDays))
|
2018-08-13 08:56:08 +02:00
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
|
2014-10-16 20:01:43 +02:00
|
|
|
vars := mux.Vars(r)
|
|
|
|
|
2018-08-13 08:56:08 +02:00
|
|
|
action := vars["action"]
|
2014-10-16 20:01:43 +02:00
|
|
|
token := vars["token"]
|
|
|
|
filename := vars["filename"]
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
metadata, err := s.CheckMetadata(token, filename, true)
|
|
|
|
|
|
|
|
if err != nil {
|
2017-03-28 16:12:31 +02:00
|
|
|
log.Printf("Error metadata: %s", err.Error())
|
|
|
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-22 22:27:26 +01:00
|
|
|
reader, contentType, contentLength, err := s.storage.Get(token, filename)
|
2017-03-28 16:12:31 +02:00
|
|
|
if s.storage.IsNotExist(err) {
|
|
|
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Could not retrieve file.", 500)
|
|
|
|
return
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
|
|
|
|
2014-10-20 14:54:42 +02:00
|
|
|
defer reader.Close()
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2018-08-13 08:56:08 +02:00
|
|
|
var disposition string
|
|
|
|
|
|
|
|
if action == "inline" {
|
|
|
|
disposition = "inline"
|
|
|
|
} else {
|
|
|
|
disposition = "attachment"
|
|
|
|
}
|
|
|
|
|
2019-06-17 02:43:22 +02:00
|
|
|
remainingDownloads, remainingDays := calcRemainingLimits(metadata)
|
|
|
|
|
2014-10-16 20:01:43 +02:00
|
|
|
w.Header().Set("Content-Type", contentType)
|
2014-10-20 14:54:42 +02:00
|
|
|
w.Header().Set("Content-Length", strconv.FormatUint(contentLength, 10))
|
2018-08-13 08:56:08 +02:00
|
|
|
w.Header().Set("Content-Disposition", fmt.Sprintf("%s; filename=\"%s\"", disposition, filename))
|
|
|
|
w.Header().Set("Connection", "keep-alive")
|
2019-06-17 02:43:22 +02:00
|
|
|
w.Header().Set("X-Remaining-Downloads", strconv.Itoa(remainingDownloads))
|
|
|
|
w.Header().Set("X-Remaining-Days", strconv.Itoa(remainingDays))
|
2014-10-16 20:01:43 +02:00
|
|
|
|
2019-04-28 21:36:45 +02:00
|
|
|
if w.Header().Get("Range") == "" {
|
|
|
|
if _, err = io.Copy(w, reader); err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Error occurred copying to output stream", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-01-16 19:58:11 +01:00
|
|
|
file, err := ioutil.TempFile(s.tempPath, "range-")
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Error occurred copying to output stream", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
defer cleanTmpFile(file)
|
|
|
|
|
|
|
|
tee := io.TeeReader(reader, file)
|
2019-04-28 21:36:45 +02:00
|
|
|
for {
|
|
|
|
b := make([]byte, _5M)
|
|
|
|
_, err = tee.Read(b)
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("%s", err.Error())
|
|
|
|
http.Error(w, "Error occurred copying to output stream", 500)
|
|
|
|
return
|
|
|
|
}
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
2019-01-16 19:58:11 +01:00
|
|
|
|
|
|
|
http.ServeContent(w, r, filename, time.Now(), file)
|
2014-10-16 20:01:43 +02:00
|
|
|
}
|
|
|
|
|
2017-03-22 18:09:21 +01:00
|
|
|
func (s *Server) RedirectHandler(h http.Handler) http.HandlerFunc {
|
2014-10-16 20:01:43 +02:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
2017-03-22 18:09:21 +01:00
|
|
|
if !s.forceHTTPs {
|
|
|
|
// we don't want to enforce https
|
|
|
|
} else if r.URL.Path == "/health.html" {
|
|
|
|
// health check url won't redirect
|
|
|
|
} else if strings.HasSuffix(ipAddrFromRemoteAddr(r.Host), ".onion") {
|
|
|
|
// .onion addresses cannot get a valid certificate, so don't redirect
|
|
|
|
} else if r.Header.Get("X-Forwarded-Proto") == "https" {
|
|
|
|
} else if r.URL.Scheme == "https" {
|
|
|
|
} else {
|
2017-03-22 23:39:59 +01:00
|
|
|
u := getURL(r)
|
2017-03-22 18:09:21 +01:00
|
|
|
u.Scheme = "https"
|
|
|
|
|
|
|
|
http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
|
2014-10-16 20:01:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a log handler for every request it receives.
|
|
|
|
func LoveHandler(h http.Handler) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("x-made-with", "<3 by DutchCoders")
|
|
|
|
w.Header().Set("x-served-by", "Proudly served by DutchCoders")
|
|
|
|
w.Header().Set("Server", "Transfer.sh HTTP Server 1.0")
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
}
|
2018-06-23 18:46:28 +02:00
|
|
|
|
2019-05-11 14:42:59 +02:00
|
|
|
func IPFilterHandler(h http.Handler, ipFilterOptions *IPFilterOptions) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if ipFilterOptions == nil {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
} else {
|
|
|
|
WrapIPFilter(h, *ipFilterOptions).ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-23 18:46:28 +02:00
|
|
|
func (s *Server) BasicAuthHandler(h http.Handler) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if s.AuthUser == "" || s.AuthPass == "" {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("WWW-Authenticate", "Basic realm=\"Restricted\"")
|
|
|
|
|
|
|
|
username, password, authOK := r.BasicAuth()
|
|
|
|
if authOK == false {
|
|
|
|
http.Error(w, "Not authorized", 401)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if username != s.AuthUser || password != s.AuthPass {
|
|
|
|
http.Error(w, "Not authorized", 401)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
}
|