Merge pull request #77 from fossyy/staging

Staging
This commit is contained in:
2024-09-20 16:44:37 +07:00
committed by GitHub
13 changed files with 216 additions and 165 deletions

View File

@ -15,16 +15,18 @@ type App struct {
Database types.Database
Cache types.CachingServer
Service types.Services
Storage types.Storage
Logger *logger.AggregatedLogger
Mail *email.SmtpServer
}
func NewClientServer(addr string, handler http.Handler, logger logger.AggregatedLogger, database types.Database, cache types.CachingServer, service types.Services, mail email.SmtpServer) App {
func NewClientServer(addr string, handler http.Handler, logger logger.AggregatedLogger, database types.Database, cache types.CachingServer, storage types.Storage, service types.Services, mail email.SmtpServer) App {
return App{
Server: http.Server{
Addr: addr,
Handler: handler,
},
Storage: storage,
Logger: &logger,
Database: database,
Cache: cache,

View File

@ -1,11 +1,10 @@
package deleteHandler
import (
"fmt"
"github.com/fossyy/filekeeper/app"
"github.com/fossyy/filekeeper/types"
"net/http"
"os"
"path/filepath"
)
func DELETE(w http.ResponseWriter, r *http.Request) {
@ -30,11 +29,7 @@ func DELETE(w http.ResponseWriter, r *http.Request) {
return
}
uploadDir := "uploads"
currentDir, _ := os.Getwd()
basePath := filepath.Join(currentDir, uploadDir)
fileFolder := filepath.Join(basePath, file.OwnerID.String(), file.ID.String())
err = os.RemoveAll(fileFolder)
err = app.Server.Storage.Delete(r.Context(), fmt.Sprintf("%s/%s", file.OwnerID.String(), file.ID.String()))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return

View File

@ -1,14 +1,12 @@
package downloadHandler
import (
"context"
"fmt"
"github.com/fossyy/filekeeper/app"
"github.com/fossyy/filekeeper/session"
"github.com/fossyy/filekeeper/types/models"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
)
@ -33,16 +31,16 @@ func GET(w http.ResponseWriter, r *http.Request) {
}
}
uploadDir := "uploads"
currentDir, _ := os.Getwd()
basePath := filepath.Join(currentDir, uploadDir)
saveFolder := filepath.Join(basePath, file.OwnerID.String(), file.ID.String())
if filepath.Dir(saveFolder) != filepath.Join(basePath, file.OwnerID.String()) {
http.Error(w, "Invalid Path", http.StatusInternalServerError)
app.Server.Logger.Error("invalid path")
return
}
//uploadDir := "uploads"
//currentDir, _ := os.Getwd()
//basePath := filepath.Join(currentDir, uploadDir)
//saveFolder := filepath.Join(basePath, file.OwnerID.String(), file.ID.String())
//
//if filepath.Dir(saveFolder) != filepath.Join(basePath, file.OwnerID.String()) {
// http.Error(w, "Invalid Path", http.StatusInternalServerError)
// app.Server.Logger.Error("invalid path")
// return
//}
rangeHeader := r.Header.Get("Range")
if rangeHeader != "" {
@ -69,7 +67,7 @@ func GET(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, file.Size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.WriteHeader(http.StatusPartialContent)
sendFileChunk(w, saveFolder, file, start, end)
sendFileChunk(w, file, start, end)
return
}
}
@ -79,11 +77,11 @@ func GET(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Content-Length", fmt.Sprintf("%d", file.Size))
sendFileChunk(w, saveFolder, file, 0, int64(file.Size-1))
sendFileChunk(w, file, 0, int64(file.Size-1))
return
}
func sendFileChunk(w http.ResponseWriter, saveFolder string, file *models.File, start, end int64) {
func sendFileChunk(w http.ResponseWriter, file *models.File, start, end int64) {
chunkSize := int64(2 * 1024 * 1024)
startChunk := start / chunkSize
@ -93,64 +91,39 @@ func sendFileChunk(w http.ResponseWriter, saveFolder string, file *models.File,
endOffset := end % chunkSize
for i := startChunk; i <= endChunk; i++ {
chunkPath := filepath.Join(saveFolder, fmt.Sprintf("chunk_%d", i))
chunkFile, err := os.Open(chunkPath)
chunkKey := fmt.Sprintf("%s/%s/chunk_%d", file.OwnerID.String(), file.ID.String(), i)
chunkData, err := app.Server.Storage.Get(context.TODO(), chunkKey)
if err != nil {
http.Error(w, fmt.Sprintf("Error opening chunk: %v", err), http.StatusInternalServerError)
http.Error(w, fmt.Sprintf("Error retrieving chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
defer chunkFile.Close()
var chunkStart, chunkEnd int64
if i == startChunk {
chunkStart = startOffset
var dataToSend []byte
if i == startChunk && i == endChunk {
dataToSend = chunkData[startOffset : endOffset+1]
} else if i == startChunk {
dataToSend = chunkData[startOffset:]
} else if i == endChunk {
dataToSend = chunkData[:endOffset+1]
} else {
chunkStart = 0
}
if i == endChunk {
chunkEnd = endOffset
} else {
chunkEnd = chunkSize - 1
dataToSend = chunkData
}
_, err = chunkFile.Seek(chunkStart, io.SeekStart)
if err != nil {
http.Error(w, fmt.Sprintf("Error seeking chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
buffer := make([]byte, 2048)
toSend := chunkEnd - chunkStart + 1
for toSend > 0 {
n, err := chunkFile.Read(buffer)
if err != nil && err != io.EOF {
http.Error(w, fmt.Sprintf("Error reading chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
if n == 0 {
break
}
if int64(n) > toSend {
n = int(toSend)
}
_, err = w.Write(buffer[:n])
_, err = w.Write(dataToSend)
if err != nil {
http.Error(w, fmt.Sprintf("Error writing chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
toSend -= int64(n)
if i == int64(file.TotalChunk)-1 && toSend == 0 {
if i == int64(file.TotalChunk)-1 {
err := app.Server.Database.IncrementDownloadCount(file.ID.String())
if err != nil {
http.Error(w, fmt.Sprintf("Error writing chunk: %v", err), http.StatusInternalServerError)
http.Error(w, fmt.Sprintf("Error updating download count: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
}
}
}
}

View File

@ -8,7 +8,6 @@ import (
"github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http"
"path/filepath"
"strconv"
)
@ -23,12 +22,16 @@ func GET(w http.ResponseWriter, r *http.Request) {
var filesData []types.FileData
for _, file := range files {
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String())
prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk)
missingChunk := len(existingChunks) != int(file.TotalChunk)
filesData = append(filesData, types.FileData{
ID: file.ID.String(),

View File

@ -7,7 +7,6 @@ import (
"github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http"
"path/filepath"
"strconv"
)
@ -16,6 +15,7 @@ func GET(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query().Get("q")
status := r.URL.Query().Get("status")
var fileStatus types.FileStatus
if status == "private" {
fileStatus = types.Private
} else if status == "public" {
@ -23,6 +23,7 @@ func GET(w http.ResponseWriter, r *http.Request) {
} else {
fileStatus = types.All
}
files, err := app.Server.Database.GetFiles(userSession.UserID.String(), query, fileStatus)
if err != nil {
app.Server.Logger.Error(err.Error())
@ -33,12 +34,16 @@ func GET(w http.ResponseWriter, r *http.Request) {
var filesData []types.FileData
for _, file := range files {
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String())
prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk)
missingChunk := len(existingChunks) != int(file.TotalChunk)
filesData = append(filesData, types.FileData{
ID: file.ID.String(),
@ -62,5 +67,4 @@ func GET(w http.ResponseWriter, r *http.Request) {
}
w.WriteHeader(http.StatusForbidden)
return
}

View File

@ -7,7 +7,6 @@ import (
"github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http"
"path/filepath"
"strconv"
)
@ -38,11 +37,16 @@ func PATCH(w http.ResponseWriter, r *http.Request) {
return
}
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk)
existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := len(existingChunks) != int(file.TotalChunk)
fileData := types.FileData{
ID: newFile.ID.String(),

View File

@ -3,13 +3,9 @@ package uploadHandler
import (
"fmt"
"github.com/fossyy/filekeeper/app"
"github.com/fossyy/filekeeper/types"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
)
func POST(w http.ResponseWriter, r *http.Request) {
@ -19,17 +15,6 @@ func POST(w http.ResponseWriter, r *http.Request) {
return
}
userSession := r.Context().Value("user").(types.User)
uploadDir := "uploads"
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
if err := os.Mkdir(uploadDir, os.ModePerm); err != nil {
app.Server.Logger.Error("error getting upload info: " + err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
}
file, err := app.Server.Service.GetFile(fileID)
if err != nil {
app.Server.Logger.Error("error getting upload info: " + err.Error())
@ -43,34 +28,6 @@ func POST(w http.ResponseWriter, r *http.Request) {
return
}
currentDir, err := os.Getwd()
if err != nil {
app.Server.Logger.Error("unable to get current directory")
w.WriteHeader(http.StatusInternalServerError)
return
}
basePath := filepath.Join(currentDir, uploadDir)
cleanBasePath := filepath.Clean(basePath)
saveFolder := filepath.Join(cleanBasePath, userSession.UserID.String(), file.ID.String())
cleanSaveFolder := filepath.Clean(saveFolder)
if !strings.HasPrefix(cleanSaveFolder, cleanBasePath) {
app.Server.Logger.Error("invalid path")
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := os.Stat(saveFolder); os.IsNotExist(err) {
if err := os.MkdirAll(saveFolder, os.ModePerm); err != nil {
app.Server.Logger.Error("error creating save folder: " + err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
}
fileByte, _, err := r.FormFile("chunk")
if err != nil {
app.Server.Logger.Error("error getting upload info: " + err.Error())
@ -79,15 +36,15 @@ func POST(w http.ResponseWriter, r *http.Request) {
}
defer fileByte.Close()
dst, err := os.OpenFile(filepath.Join(saveFolder, fmt.Sprintf("chunk_%d", index)), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
buffer, err := io.ReadAll(fileByte)
if err != nil {
app.Server.Logger.Error("error making upload folder: " + err.Error())
app.Server.Logger.Error("error copying byte to file dst: " + err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
defer dst.Close()
if _, err := io.Copy(dst, fileByte); err != nil {
err = app.Server.Storage.Add(r.Context(), fmt.Sprintf("%s/%s/chunk_%d", file.OwnerID.String(), file.ID.String(), index), buffer)
if err != nil {
app.Server.Logger.Error("error copying byte to file dst: " + err.Error())
w.WriteHeader(http.StatusInternalServerError)
return

View File

@ -7,7 +7,6 @@ import (
"github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http"
"path/filepath"
"strconv"
)
@ -32,11 +31,17 @@ func PUT(w http.ResponseWriter, r *http.Request) {
app.Server.Logger.Error(err.Error())
return
}
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk)
prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := len(existingChunks) != int(file.TotalChunk)
fileData := types.FileData{
ID: file.ID.String(),
Name: file.Name,

View File

@ -1,6 +1,7 @@
package userHandler
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -15,7 +16,6 @@ import (
"github.com/gorilla/websocket"
"gorm.io/gorm"
"net/http"
"path/filepath"
"strings"
)
@ -196,34 +196,27 @@ func handlerWS(conn *websocket.Conn, userSession types.User) {
Name: newFile.Name,
Size: newFile.Size,
Downloaded: newFile.Downloaded,
Done: false,
}
fileData.Chunk = make(map[string]bool)
saveFolder := filepath.Join("uploads", userSession.UserID.String(), newFile.ID.String())
prefix := fmt.Sprintf("%s/%s/chunk_", userSession.UserID.String(), newFile.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
existingChunks, err := app.Server.Storage.ListObjects(context.TODO(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
fileData.Done = false
sendErrorResponse(conn, action.Action, "Unknown error")
continue
} else {
for i := 0; i <= int(newFile.TotalChunk); i++ {
for i := 0; i < int(newFile.TotalChunk); i++ {
fileData.Chunk[fmt.Sprintf("chunk_%d", i)] = false
}
for _, chunkFile := range chunkFiles {
for _, chunkFile := range existingChunks {
var chunkIndex int
fmt.Sscanf(filepath.Base(chunkFile), "chunk_%d", &chunkIndex)
fmt.Sscanf(chunkFile, "chunk_%d", &chunkIndex)
fileData.Chunk[fmt.Sprintf("chunk_%d", chunkIndex)] = true
}
for i := 0; i <= int(newFile.TotalChunk); i++ {
if !fileData.Chunk[fmt.Sprintf("chunk_%d", i)] {
fileData.Done = false
break
}
}
}
sendSuccessResponseWithID(conn, action.Action, fileData, uploadNewFile.RequestID)
continue
@ -246,10 +239,8 @@ func handlerWS(conn *websocket.Conn, userSession types.User) {
Done: true,
}
saveFolder := filepath.Join("uploads", userSession.UserID.String(), fileData.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
prefix := fmt.Sprintf("%s/%s/chunk_", userSession.UserID.String(), file.ID.String())
existingChunks, err := app.Server.Storage.ListObjects(context.TODO(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
fileData.Done = false
@ -257,9 +248,9 @@ func handlerWS(conn *websocket.Conn, userSession types.User) {
for i := 0; i < int(file.TotalChunk); i++ {
fileData.Chunk[fmt.Sprintf("chunk_%d", i)] = false
}
for _, chunkFile := range chunkFiles {
for _, chunkFile := range existingChunks {
var chunkIndex int
fmt.Sscanf(filepath.Base(chunkFile), "chunk_%d", &chunkIndex)
fmt.Sscanf(chunkFile, "chunk_%d", &chunkIndex)
fileData.Chunk[fmt.Sprintf("chunk_%d", chunkIndex)] = true
}

10
main.go
View File

@ -2,6 +2,7 @@ package main
import (
"fmt"
"github.com/fossyy/filekeeper/storage"
"strconv"
"github.com/fossyy/filekeeper/app"
@ -36,7 +37,14 @@ func main() {
smtpPort, _ := strconv.Atoi(utils.Getenv("SMTP_PORT"))
mailServer := email.NewSmtpServer(utils.Getenv("SMTP_HOST"), smtpPort, utils.Getenv("SMTP_USER"), utils.Getenv("SMTP_PASSWORD"))
app.Server = app.NewClientServer(clientAddr, middleware.Handler(client.SetupRoutes()), *logger.Logger(), database, cacheServer, services, mailServer)
bucket := utils.Getenv("S3_BUCKET_NAME")
region := utils.Getenv("S3_REGION")
endpoint := utils.Getenv("S3_ENDPOINT")
accessKey := utils.Getenv("S3_ACCESS_KEY")
secretKey := utils.Getenv("S3_SECRET_KEY")
S3 := storage.NewS3(bucket, region, endpoint, accessKey, secretKey)
app.Server = app.NewClientServer(clientAddr, middleware.Handler(client.SetupRoutes()), *logger.Logger(), database, cacheServer, S3, services, mailServer)
app.Admin = app.NewAdminServer(adminAddr, middleware.Handler(admin.SetupRoutes()), database)
go func() {

View File

@ -1,6 +1,8 @@
package client
import (
"fmt"
"github.com/fossyy/filekeeper/app"
googleOauthHandler "github.com/fossyy/filekeeper/handler/auth/google"
googleOauthCallbackHandler "github.com/fossyy/filekeeper/handler/auth/google/callback"
googleOauthSetupHandler "github.com/fossyy/filekeeper/handler/auth/google/setup"
@ -157,6 +159,25 @@ func SetupRoutes() *http.ServeMux {
http.ServeFile(w, r, "public/favicon.ico")
})
handler.HandleFunc("GET /test", func(w http.ResponseWriter, r *http.Request) {
objects, err := app.Server.Storage.ListObjects(r.Context(), "test/")
fmt.Println(objects)
if err != nil {
return
}
if r.URL.Query().Get("new") != "" {
app.Server.Storage.Add(r.Context(), "test.txt", []byte(r.URL.Query().Get("new")))
w.Write([]byte("succes"))
return
}
get, err := app.Server.Storage.Get(r.Context(), "test.txt")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(get)
})
fileServer := http.FileServer(http.Dir("./public"))
handler.Handle("/public/", http.StripPrefix("/public", fileServer))

80
storage/storage.go Normal file
View File

@ -0,0 +1,80 @@
package storage
import (
"bytes"
"context"
"fmt"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"io"
"path/filepath"
)
type S3 struct {
Client *minio.Client
Bucket string
}
func NewS3(bucket string, region string, endpoint string, accessKey string, secretKey string) *S3 {
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: true,
Region: region,
})
if err != nil {
return nil
}
return &S3{Client: minioClient, Bucket: bucket}
}
func (storage *S3) Get(ctx context.Context, key string) ([]byte, error) {
object, err := storage.Client.GetObject(ctx, storage.Bucket, key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
defer object.Close()
data, err := io.ReadAll(object)
if err != nil {
return nil, err
}
return data, nil
}
func (storage *S3) Add(ctx context.Context, key string, data []byte) error {
reader := bytes.NewReader(data)
_, err := storage.Client.PutObject(ctx, storage.Bucket, key, reader, int64(reader.Len()), minio.PutObjectOptions{
ContentType: "application/octet-stream",
})
if err != nil {
return err
}
return nil
}
func (storage *S3) Delete(ctx context.Context, key string) error {
err := storage.Client.RemoveObject(ctx, storage.Bucket, key, minio.RemoveObjectOptions{})
if err != nil {
return err
}
return nil
}
func (storage *S3) ListObjects(ctx context.Context, prefix string) ([]string, error) {
var objects []string
objectCh := storage.Client.ListObjects(ctx, storage.Bucket, minio.ListObjectsOptions{
Prefix: prefix,
})
for object := range objectCh {
if object.Err != nil {
return nil, fmt.Errorf("failed to list objects: %w", object.Err)
}
fileName := filepath.Base(object.Key)
objects = append(objects, fileName)
}
return objects, nil
}

View File

@ -2,9 +2,10 @@ package types
import (
"context"
"time"
"github.com/fossyy/filekeeper/types/models"
"github.com/google/uuid"
"time"
)
type FileStatus string
@ -92,3 +93,10 @@ type Services interface {
GetUserFile(name, ownerID string) (*FileWithDetail, error)
GetUserStorageUsage(ownerID string) (uint64, error)
}
type Storage interface {
Get(ctx context.Context, key string) ([]byte, error)
Add(ctx context.Context, key string, data []byte) error
Delete(ctx context.Context, key string) error
ListObjects(ctx context.Context, prefix string) ([]string, error)
}