.github/workflows: adding golangci-lint as new job (#453)

In order to follow golang's best practices, we should lint the code base properly beyond usual syntax mistakes.
This commit is contained in:
Stefan Benten 2021-12-26 14:03:27 +01:00 committed by GitHub
parent 5932a194b2
commit 2fbd19365c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 98 additions and 53 deletions

View File

@ -29,3 +29,17 @@ jobs:
go version go version
go vet ./... go vet ./...
go test ./... go test ./...
golangci:
name: Linting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v1
with:
go-version: 1.17
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: latest
skip-go-installation: true
args: "--config .golangci.yml"

20
.golangci.yml Normal file
View File

@ -0,0 +1,20 @@
run:
deadline: 10m
issues-exit-code: 1
tests: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
linters:
disable:
- deadcode
- unused
issues:
max-issues-per-linter: 0
max-same-issues: 0
new: false
exclude-use-default: false

12
main.go
View File

@ -1,8 +1,16 @@
package main package main
import "github.com/dutchcoders/transfer.sh/cmd" import (
"log"
"os"
"github.com/dutchcoders/transfer.sh/cmd"
)
func main() { func main() {
app := cmd.New() app := cmd.New()
app.RunAndExitOnError() err := app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
} }

View File

@ -31,7 +31,6 @@ import (
// _ "transfer.sh/app/utils" // _ "transfer.sh/app/utils"
"fmt" "fmt"
"io"
"net/http" "net/http"
"time" "time"
@ -50,9 +49,7 @@ func (s *Server) scanHandler(w http.ResponseWriter, r *http.Request) {
s.logger.Printf("Scanning %s %d %s", filename, contentLength, contentType) s.logger.Printf("Scanning %s %d %s", filename, contentLength, contentType)
var reader io.Reader reader := r.Body
reader = r.Body
c := clamd.NewClamd(s.ClamAVDaemonHost) c := clamd.NewClamd(s.ClamAVDaemonHost)
@ -67,7 +64,7 @@ func (s *Server) scanHandler(w http.ResponseWriter, r *http.Request) {
select { select {
case s := <-response: case s := <-response:
w.Write([]byte(fmt.Sprintf("%v\n", s.Status))) _, _ = w.Write([]byte(fmt.Sprintf("%v\n", s.Status)))
case <-time.After(time.Second * 60): case <-time.After(time.Second * 60):
abort <- true abort <- true
} }

View File

@ -462,14 +462,13 @@ func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {
reader = r.Body reader = r.Body
defer r.Body.Close() defer CloseCheck(r.Body.Close)
if contentLength == -1 { if contentLength == -1 {
// queue file to disk, because s3 needs content length // queue file to disk, because s3 needs content length
var err error var err error
var f io.Reader
f = reader f := reader
var b bytes.Buffer var b bytes.Buffer
@ -575,13 +574,9 @@ func resolveURL(r *http.Request, u *url.URL, proxyPort string) string {
} }
func resolveKey(key, proxyPath string) string { func resolveKey(key, proxyPath string) string {
if strings.HasPrefix(key, "/") { key = strings.TrimPrefix(key, "/")
key = key[1:]
}
if strings.HasPrefix(key, proxyPath) { key = strings.TrimPrefix(key, proxyPath)
key = key[len(proxyPath):]
}
key = strings.Replace(key, "\\", "/", -1) key = strings.Replace(key, "\\", "/", -1)
@ -660,7 +655,7 @@ func (metadata metadata) remainingLimitHeaderValues() (remainingDownloads, remai
if metadata.MaxDate.IsZero() { if metadata.MaxDate.IsZero() {
remainingDays = "n/a" remainingDays = "n/a"
} else { } else {
timeDifference := metadata.MaxDate.Sub(time.Now()) timeDifference := time.Until(metadata.MaxDate)
remainingDays = strconv.Itoa(int(timeDifference.Hours()/24) + 1) remainingDays = strconv.Itoa(int(timeDifference.Hours()/24) + 1)
} }
@ -679,8 +674,6 @@ func (s *Server) lock(token, filename string) {
lock, _ := s.locks.LoadOrStore(key, &sync.Mutex{}) lock, _ := s.locks.LoadOrStore(key, &sync.Mutex{})
lock.(*sync.Mutex).Lock() lock.(*sync.Mutex).Lock()
return
} }
func (s *Server) unlock(token, filename string) { func (s *Server) unlock(token, filename string) {
@ -702,7 +695,7 @@ func (s *Server) checkMetadata(token, filename string, increaseDownload bool) (m
return metadata, err return metadata, err
} }
defer r.Close() defer CloseCheck(r.Close)
if err := json.NewDecoder(r).Decode(&metadata); err != nil { if err := json.NewDecoder(r).Decode(&metadata); err != nil {
return metadata, err return metadata, err
@ -740,7 +733,7 @@ func (s *Server) checkDeletionToken(deletionToken, token, filename string) error
return err return err
} }
defer r.Close() defer CloseCheck(r.Close)
if err := json.NewDecoder(r).Decode(&metadata); err != nil { if err := json.NewDecoder(r).Decode(&metadata); err != nil {
return err return err
@ -755,9 +748,9 @@ func (s *Server) purgeHandler() {
ticker := time.NewTicker(s.purgeInterval) ticker := time.NewTicker(s.purgeInterval)
go func() { go func() {
for { for {
select { <-ticker.C
case <-ticker.C: err := s.storage.Purge(s.purgeDays)
err := s.storage.Purge(s.purgeDays) if err != nil {
s.logger.Printf("error cleaning up expired files: %v", err) s.logger.Printf("error cleaning up expired files: %v", err)
} }
} }
@ -825,7 +818,7 @@ func (s *Server) zipHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
defer reader.Close() defer CloseCheck(reader.Close)
header := &zip.FileHeader{ header := &zip.FileHeader{
Name: strings.Split(key, "/")[1], Name: strings.Split(key, "/")[1],
@ -868,10 +861,10 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Connection", "close") w.Header().Set("Connection", "close")
os := gzip.NewWriter(w) os := gzip.NewWriter(w)
defer os.Close() defer CloseCheck(os.Close)
zw := tar.NewWriter(os) zw := tar.NewWriter(os)
defer zw.Close() defer CloseCheck(zw.Close)
for _, key := range strings.Split(files, ",") { for _, key := range strings.Split(files, ",") {
key = resolveKey(key, s.proxyPath) key = resolveKey(key, s.proxyPath)
@ -896,7 +889,7 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
defer reader.Close() defer CloseCheck(reader.Close)
header := &tar.Header{ header := &tar.Header{
Name: strings.Split(key, "/")[1], Name: strings.Split(key, "/")[1],
@ -930,7 +923,7 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Connection", "close") w.Header().Set("Connection", "close")
zw := tar.NewWriter(w) zw := tar.NewWriter(w)
defer zw.Close() defer CloseCheck(zw.Close)
for _, key := range strings.Split(files, ",") { for _, key := range strings.Split(files, ",") {
key = resolveKey(key, s.proxyPath) key = resolveKey(key, s.proxyPath)
@ -955,7 +948,7 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
defer reader.Close() defer CloseCheck(reader.Close)
header := &tar.Header{ header := &tar.Header{
Name: strings.Split(key, "/")[1], Name: strings.Split(key, "/")[1],
@ -1037,7 +1030,7 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
defer reader.Close() defer CloseCheck(reader.Close)
var disposition string var disposition string
@ -1086,8 +1079,6 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Error occurred copying to output stream", 500) http.Error(w, "Error occurred copying to output stream", 500)
return return
} }
return
} }
// RedirectHandler handles redirect // RedirectHandler handles redirect
@ -1142,7 +1133,6 @@ func ipFilterHandler(h http.Handler, ipFilterOptions *IPFilterOptions) http.Hand
} else { } else {
WrapIPFilter(h, *ipFilterOptions).ServeHTTP(w, r) WrapIPFilter(h, *ipFilterOptions).ServeHTTP(w, r)
} }
return
} }
} }
@ -1156,7 +1146,7 @@ func (s *Server) basicAuthHandler(h http.Handler) http.HandlerFunc {
w.Header().Set("WWW-Authenticate", "Basic realm=\"Restricted\"") w.Header().Set("WWW-Authenticate", "Basic realm=\"Restricted\"")
username, password, authOK := r.BasicAuth() username, password, authOK := r.BasicAuth()
if authOK == false { if !authOK {
http.Error(w, "Not authorized", 401) http.Error(w, "Not authorized", 401)
return return
} }

View File

@ -393,7 +393,7 @@ func (s *Server) Run() {
go func() { go func() {
s.logger.Println("Profiled listening at: :6060") s.logger.Println("Profiled listening at: :6060")
http.ListenAndServe(":6060", nil) _ = http.ListenAndServe(":6060", nil)
}() }()
} }
@ -424,8 +424,14 @@ func (s *Server) Run() {
s.logger.Panicf("Unable to parse: path=%s, err=%s", path, err) s.logger.Panicf("Unable to parse: path=%s, err=%s", path, err)
} }
htmlTemplates.New(stripPrefix(path)).Parse(string(bytes)) _, err = htmlTemplates.New(stripPrefix(path)).Parse(string(bytes))
textTemplates.New(stripPrefix(path)).Parse(string(bytes)) if err != nil {
s.logger.Panicln("Unable to parse template")
}
_, err = textTemplates.New(stripPrefix(path)).Parse(string(bytes))
if err != nil {
s.logger.Panicln("Unable to parse template")
}
} }
} }
@ -493,7 +499,7 @@ func (s *Server) Run() {
r.NotFoundHandler = http.HandlerFunc(s.notFoundHandler) r.NotFoundHandler = http.HandlerFunc(s.notFoundHandler)
mime.AddExtensionType(".md", "text/x-markdown") _ = mime.AddExtensionType(".md", "text/x-markdown")
s.logger.Printf("Transfer.sh server started.\nusing temp folder: %s\nusing storage provider: %s", s.tempPath, s.storage.Type()) s.logger.Printf("Transfer.sh server started.\nusing temp folder: %s\nusing storage provider: %s", s.tempPath, s.storage.Type())
@ -532,7 +538,7 @@ func (s *Server) Run() {
s.logger.Printf("listening on port: %v\n", s.ListenerString) s.logger.Printf("listening on port: %v\n", s.ListenerString)
go func() { go func() {
srvr.ListenAndServe() _ = srvr.ListenAndServe()
}() }()
} }

View File

@ -99,7 +99,7 @@ func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser,
// Delete removes a file from storage // Delete removes a file from storage
func (s *LocalStorage) Delete(token string, filename string) (err error) { func (s *LocalStorage) Delete(token string, filename string) (err error) {
metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename)) metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename))
os.Remove(metadata) _ = os.Remove(metadata)
path := filepath.Join(s.basedir, token, filename) path := filepath.Join(s.basedir, token, filename)
err = os.Remove(path) err = os.Remove(path)
@ -152,7 +152,7 @@ func (s *LocalStorage) Put(token string, filename string, reader io.Reader, cont
return err return err
} }
defer f.Close() defer CloseCheck(f.Close)
if _, err = io.Copy(f, reader); err != nil { if _, err = io.Copy(f, reader); err != nil {
return err return err
@ -336,8 +336,9 @@ func NewGDriveStorage(clientJSONFilepath string, localConfigPath string, basedir
if err != nil { if err != nil {
return nil, err return nil, err
} }
srv, err := drive.New(getGDriveClient(config, localConfigPath, logger)) // ToDo: Upgrade deprecated version
srv, err := drive.New(getGDriveClient(config, localConfigPath, logger)) // nolint: staticcheck
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -493,6 +494,9 @@ func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, conte
var fi *drive.File var fi *drive.File
fi, err = s.service.Files.Get(fileID).Fields("size", "md5Checksum").Do() fi, err = s.service.Files.Get(fileID).Fields("size", "md5Checksum").Do()
if err != nil {
return
}
if !s.hasChecksum(fi) { if !s.hasChecksum(fi) {
err = fmt.Errorf("Cannot find file %s/%s", token, filename) err = fmt.Errorf("Cannot find file %s/%s", token, filename)
return return
@ -515,7 +519,7 @@ func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, conte
// Delete removes a file from storage // Delete removes a file from storage
func (s *GDrive) Delete(token string, filename string) (err error) { func (s *GDrive) Delete(token string, filename string) (err error) {
metadata, _ := s.findID(fmt.Sprintf("%s.metadata", filename), token) metadata, _ := s.findID(fmt.Sprintf("%s.metadata", filename), token)
s.service.Files.Delete(metadata).Do() _ = s.service.Files.Delete(metadata).Do()
var fileID string var fileID string
fileID, err = s.findID(filename, token) fileID, err = s.findID(filename, token)
@ -644,7 +648,7 @@ func getGDriveTokenFromWeb(config *oauth2.Config, logger *log.Logger) *oauth2.To
// Retrieves a token from a local file. // Retrieves a token from a local file.
func gDriveTokenFromFile(file string) (*oauth2.Token, error) { func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
f, err := os.Open(file) f, err := os.Open(file)
defer f.Close() defer CloseCheck(f.Close)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -657,12 +661,15 @@ func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) { func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
logger.Printf("Saving credential file to: %s\n", path) logger.Printf("Saving credential file to: %s\n", path)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
defer f.Close() defer CloseCheck(f.Close)
if err != nil { if err != nil {
logger.Fatalf("Unable to cache oauth token: %v", err) logger.Fatalf("Unable to cache oauth token: %v", err)
} }
json.NewEncoder(f).Encode(token) err = json.NewEncoder(f).Encode(token)
if err != nil {
logger.Fatalf("Unable to encode oauth token: %v", err)
}
} }
// StorjStorage is a storage backed by Storj // StorjStorage is a storage backed by Storj

View File

@ -279,3 +279,9 @@ func formatSize(size int64) string {
getSuffix := suffixes[int(math.Floor(base))] getSuffix := suffixes[int(math.Floor(base))]
return fmt.Sprintf("%s %s", strconv.FormatFloat(newVal, 'f', -1, 64), getSuffix) return fmt.Sprintf("%s %s", strconv.FormatFloat(newVal, 'f', -1, 64), getSuffix)
} }
func CloseCheck(f func() error) {
if err := f(); err != nil {
fmt.Println("Received close error:", err)
}
}

View File

@ -26,7 +26,6 @@ package server
import ( import (
"fmt" "fmt"
"io"
"net/http" "net/http"
"github.com/gorilla/mux" "github.com/gorilla/mux"
@ -49,9 +48,7 @@ func (s *Server) virusTotalHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), 500) http.Error(w, err.Error(), 500)
} }
var reader io.Reader reader := r.Body
reader = r.Body
result, err := vt.Scan(filename, reader) result, err := vt.Scan(filename, reader)
if err != nil { if err != nil {
@ -59,5 +56,5 @@ func (s *Server) virusTotalHandler(w http.ResponseWriter, r *http.Request) {
} }
s.logger.Println(result) s.logger.Println(result)
w.Write([]byte(fmt.Sprintf("%v\n", result.Permalink))) _, _ = w.Write([]byte(fmt.Sprintf("%v\n", result.Permalink)))
} }