Merge pull request #77 from fossyy/staging

Staging
This commit is contained in:
2024-09-20 16:44:37 +07:00
committed by GitHub
13 changed files with 216 additions and 165 deletions

View File

@ -15,16 +15,18 @@ type App struct {
Database types.Database Database types.Database
Cache types.CachingServer Cache types.CachingServer
Service types.Services Service types.Services
Storage types.Storage
Logger *logger.AggregatedLogger Logger *logger.AggregatedLogger
Mail *email.SmtpServer Mail *email.SmtpServer
} }
func NewClientServer(addr string, handler http.Handler, logger logger.AggregatedLogger, database types.Database, cache types.CachingServer, service types.Services, mail email.SmtpServer) App { func NewClientServer(addr string, handler http.Handler, logger logger.AggregatedLogger, database types.Database, cache types.CachingServer, storage types.Storage, service types.Services, mail email.SmtpServer) App {
return App{ return App{
Server: http.Server{ Server: http.Server{
Addr: addr, Addr: addr,
Handler: handler, Handler: handler,
}, },
Storage: storage,
Logger: &logger, Logger: &logger,
Database: database, Database: database,
Cache: cache, Cache: cache,

View File

@ -1,11 +1,10 @@
package deleteHandler package deleteHandler
import ( import (
"fmt"
"github.com/fossyy/filekeeper/app" "github.com/fossyy/filekeeper/app"
"github.com/fossyy/filekeeper/types" "github.com/fossyy/filekeeper/types"
"net/http" "net/http"
"os"
"path/filepath"
) )
func DELETE(w http.ResponseWriter, r *http.Request) { func DELETE(w http.ResponseWriter, r *http.Request) {
@ -30,11 +29,7 @@ func DELETE(w http.ResponseWriter, r *http.Request) {
return return
} }
uploadDir := "uploads" err = app.Server.Storage.Delete(r.Context(), fmt.Sprintf("%s/%s", file.OwnerID.String(), file.ID.String()))
currentDir, _ := os.Getwd()
basePath := filepath.Join(currentDir, uploadDir)
fileFolder := filepath.Join(basePath, file.OwnerID.String(), file.ID.String())
err = os.RemoveAll(fileFolder)
if err != nil { if err != nil {
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return

View File

@ -1,14 +1,12 @@
package downloadHandler package downloadHandler
import ( import (
"context"
"fmt" "fmt"
"github.com/fossyy/filekeeper/app" "github.com/fossyy/filekeeper/app"
"github.com/fossyy/filekeeper/session" "github.com/fossyy/filekeeper/session"
"github.com/fossyy/filekeeper/types/models" "github.com/fossyy/filekeeper/types/models"
"io"
"net/http" "net/http"
"os"
"path/filepath"
"strconv" "strconv"
"strings" "strings"
) )
@ -33,16 +31,16 @@ func GET(w http.ResponseWriter, r *http.Request) {
} }
} }
uploadDir := "uploads" //uploadDir := "uploads"
currentDir, _ := os.Getwd() //currentDir, _ := os.Getwd()
basePath := filepath.Join(currentDir, uploadDir) //basePath := filepath.Join(currentDir, uploadDir)
saveFolder := filepath.Join(basePath, file.OwnerID.String(), file.ID.String()) //saveFolder := filepath.Join(basePath, file.OwnerID.String(), file.ID.String())
//
if filepath.Dir(saveFolder) != filepath.Join(basePath, file.OwnerID.String()) { //if filepath.Dir(saveFolder) != filepath.Join(basePath, file.OwnerID.String()) {
http.Error(w, "Invalid Path", http.StatusInternalServerError) // http.Error(w, "Invalid Path", http.StatusInternalServerError)
app.Server.Logger.Error("invalid path") // app.Server.Logger.Error("invalid path")
return // return
} //}
rangeHeader := r.Header.Get("Range") rangeHeader := r.Header.Get("Range")
if rangeHeader != "" { if rangeHeader != "" {
@ -69,7 +67,7 @@ func GET(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, file.Size)) w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, file.Size))
w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1)) w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
w.WriteHeader(http.StatusPartialContent) w.WriteHeader(http.StatusPartialContent)
sendFileChunk(w, saveFolder, file, start, end) sendFileChunk(w, file, start, end)
return return
} }
} }
@ -79,11 +77,11 @@ func GET(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Content-Length", fmt.Sprintf("%d", file.Size)) w.Header().Set("Content-Length", fmt.Sprintf("%d", file.Size))
sendFileChunk(w, saveFolder, file, 0, int64(file.Size-1)) sendFileChunk(w, file, 0, int64(file.Size-1))
return return
} }
func sendFileChunk(w http.ResponseWriter, saveFolder string, file *models.File, start, end int64) { func sendFileChunk(w http.ResponseWriter, file *models.File, start, end int64) {
chunkSize := int64(2 * 1024 * 1024) chunkSize := int64(2 * 1024 * 1024)
startChunk := start / chunkSize startChunk := start / chunkSize
@ -93,64 +91,39 @@ func sendFileChunk(w http.ResponseWriter, saveFolder string, file *models.File,
endOffset := end % chunkSize endOffset := end % chunkSize
for i := startChunk; i <= endChunk; i++ { for i := startChunk; i <= endChunk; i++ {
chunkPath := filepath.Join(saveFolder, fmt.Sprintf("chunk_%d", i)) chunkKey := fmt.Sprintf("%s/%s/chunk_%d", file.OwnerID.String(), file.ID.String(), i)
chunkFile, err := os.Open(chunkPath) chunkData, err := app.Server.Storage.Get(context.TODO(), chunkKey)
if err != nil { if err != nil {
http.Error(w, fmt.Sprintf("Error opening chunk: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("Error retrieving chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
return return
} }
defer chunkFile.Close()
var chunkStart, chunkEnd int64 var dataToSend []byte
if i == startChunk { if i == startChunk && i == endChunk {
chunkStart = startOffset dataToSend = chunkData[startOffset : endOffset+1]
} else if i == startChunk {
dataToSend = chunkData[startOffset:]
} else if i == endChunk {
dataToSend = chunkData[:endOffset+1]
} else { } else {
chunkStart = 0 dataToSend = chunkData
}
if i == endChunk {
chunkEnd = endOffset
} else {
chunkEnd = chunkSize - 1
} }
_, err = chunkFile.Seek(chunkStart, io.SeekStart) _, err = w.Write(dataToSend)
if err != nil {
http.Error(w, fmt.Sprintf("Error seeking chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
buffer := make([]byte, 2048)
toSend := chunkEnd - chunkStart + 1
for toSend > 0 {
n, err := chunkFile.Read(buffer)
if err != nil && err != io.EOF {
http.Error(w, fmt.Sprintf("Error reading chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error())
return
}
if n == 0 {
break
}
if int64(n) > toSend {
n = int(toSend)
}
_, err = w.Write(buffer[:n])
if err != nil { if err != nil {
http.Error(w, fmt.Sprintf("Error writing chunk: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("Error writing chunk: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
return return
} }
toSend -= int64(n)
if i == int64(file.TotalChunk)-1 && toSend == 0 { if i == int64(file.TotalChunk)-1 {
err := app.Server.Database.IncrementDownloadCount(file.ID.String()) err := app.Server.Database.IncrementDownloadCount(file.ID.String())
if err != nil { if err != nil {
http.Error(w, fmt.Sprintf("Error writing chunk: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("Error updating download count: %v", err), http.StatusInternalServerError)
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
return return
} }
} }
} }
} }
}

View File

@ -8,7 +8,6 @@ import (
"github.com/fossyy/filekeeper/utils" "github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file" fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http" "net/http"
"path/filepath"
"strconv" "strconv"
) )
@ -23,12 +22,16 @@ func GET(w http.ResponseWriter, r *http.Request) {
var filesData []types.FileData var filesData []types.FileData
for _, file := range files { for _, file := range files {
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String()) prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder) existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
chunkFiles, err := filepath.Glob(pattern) if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk) missingChunk := len(existingChunks) != int(file.TotalChunk)
filesData = append(filesData, types.FileData{ filesData = append(filesData, types.FileData{
ID: file.ID.String(), ID: file.ID.String(),

View File

@ -7,7 +7,6 @@ import (
"github.com/fossyy/filekeeper/utils" "github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file" fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http" "net/http"
"path/filepath"
"strconv" "strconv"
) )
@ -16,6 +15,7 @@ func GET(w http.ResponseWriter, r *http.Request) {
query := r.URL.Query().Get("q") query := r.URL.Query().Get("q")
status := r.URL.Query().Get("status") status := r.URL.Query().Get("status")
var fileStatus types.FileStatus var fileStatus types.FileStatus
if status == "private" { if status == "private" {
fileStatus = types.Private fileStatus = types.Private
} else if status == "public" { } else if status == "public" {
@ -23,6 +23,7 @@ func GET(w http.ResponseWriter, r *http.Request) {
} else { } else {
fileStatus = types.All fileStatus = types.All
} }
files, err := app.Server.Database.GetFiles(userSession.UserID.String(), query, fileStatus) files, err := app.Server.Database.GetFiles(userSession.UserID.String(), query, fileStatus)
if err != nil { if err != nil {
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
@ -33,12 +34,16 @@ func GET(w http.ResponseWriter, r *http.Request) {
var filesData []types.FileData var filesData []types.FileData
for _, file := range files { for _, file := range files {
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String()) prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder) existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
chunkFiles, err := filepath.Glob(pattern) if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk) missingChunk := len(existingChunks) != int(file.TotalChunk)
filesData = append(filesData, types.FileData{ filesData = append(filesData, types.FileData{
ID: file.ID.String(), ID: file.ID.String(),
@ -62,5 +67,4 @@ func GET(w http.ResponseWriter, r *http.Request) {
} }
w.WriteHeader(http.StatusForbidden) w.WriteHeader(http.StatusForbidden)
return
} }

View File

@ -7,7 +7,6 @@ import (
"github.com/fossyy/filekeeper/utils" "github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file" fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http" "net/http"
"path/filepath"
"strconv" "strconv"
) )
@ -38,11 +37,16 @@ func PATCH(w http.ResponseWriter, r *http.Request) {
return return
} }
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String()) prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk) existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := len(existingChunks) != int(file.TotalChunk)
fileData := types.FileData{ fileData := types.FileData{
ID: newFile.ID.String(), ID: newFile.ID.String(),

View File

@ -3,13 +3,9 @@ package uploadHandler
import ( import (
"fmt" "fmt"
"github.com/fossyy/filekeeper/app" "github.com/fossyy/filekeeper/app"
"github.com/fossyy/filekeeper/types"
"io" "io"
"net/http" "net/http"
"os"
"path/filepath"
"strconv" "strconv"
"strings"
) )
func POST(w http.ResponseWriter, r *http.Request) { func POST(w http.ResponseWriter, r *http.Request) {
@ -19,17 +15,6 @@ func POST(w http.ResponseWriter, r *http.Request) {
return return
} }
userSession := r.Context().Value("user").(types.User)
uploadDir := "uploads"
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
if err := os.Mkdir(uploadDir, os.ModePerm); err != nil {
app.Server.Logger.Error("error getting upload info: " + err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
}
file, err := app.Server.Service.GetFile(fileID) file, err := app.Server.Service.GetFile(fileID)
if err != nil { if err != nil {
app.Server.Logger.Error("error getting upload info: " + err.Error()) app.Server.Logger.Error("error getting upload info: " + err.Error())
@ -43,34 +28,6 @@ func POST(w http.ResponseWriter, r *http.Request) {
return return
} }
currentDir, err := os.Getwd()
if err != nil {
app.Server.Logger.Error("unable to get current directory")
w.WriteHeader(http.StatusInternalServerError)
return
}
basePath := filepath.Join(currentDir, uploadDir)
cleanBasePath := filepath.Clean(basePath)
saveFolder := filepath.Join(cleanBasePath, userSession.UserID.String(), file.ID.String())
cleanSaveFolder := filepath.Clean(saveFolder)
if !strings.HasPrefix(cleanSaveFolder, cleanBasePath) {
app.Server.Logger.Error("invalid path")
w.WriteHeader(http.StatusInternalServerError)
return
}
if _, err := os.Stat(saveFolder); os.IsNotExist(err) {
if err := os.MkdirAll(saveFolder, os.ModePerm); err != nil {
app.Server.Logger.Error("error creating save folder: " + err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
}
fileByte, _, err := r.FormFile("chunk") fileByte, _, err := r.FormFile("chunk")
if err != nil { if err != nil {
app.Server.Logger.Error("error getting upload info: " + err.Error()) app.Server.Logger.Error("error getting upload info: " + err.Error())
@ -79,15 +36,15 @@ func POST(w http.ResponseWriter, r *http.Request) {
} }
defer fileByte.Close() defer fileByte.Close()
dst, err := os.OpenFile(filepath.Join(saveFolder, fmt.Sprintf("chunk_%d", index)), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) buffer, err := io.ReadAll(fileByte)
if err != nil { if err != nil {
app.Server.Logger.Error("error making upload folder: " + err.Error()) app.Server.Logger.Error("error copying byte to file dst: " + err.Error())
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return
} }
defer dst.Close() err = app.Server.Storage.Add(r.Context(), fmt.Sprintf("%s/%s/chunk_%d", file.OwnerID.String(), file.ID.String(), index), buffer)
if _, err := io.Copy(dst, fileByte); err != nil { if err != nil {
app.Server.Logger.Error("error copying byte to file dst: " + err.Error()) app.Server.Logger.Error("error copying byte to file dst: " + err.Error())
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
return return

View File

@ -7,7 +7,6 @@ import (
"github.com/fossyy/filekeeper/utils" "github.com/fossyy/filekeeper/utils"
fileView "github.com/fossyy/filekeeper/view/client/file" fileView "github.com/fossyy/filekeeper/view/client/file"
"net/http" "net/http"
"path/filepath"
"strconv" "strconv"
) )
@ -32,11 +31,17 @@ func PUT(w http.ResponseWriter, r *http.Request) {
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
return return
} }
saveFolder := filepath.Join("uploads", userSession.UserID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder)
chunkFiles, err := filepath.Glob(pattern)
missingChunk := err != nil || len(chunkFiles) != int(file.TotalChunk) prefix := fmt.Sprintf("%s/%s/chunk_", file.OwnerID.String(), file.ID.String())
existingChunks, err := app.Server.Storage.ListObjects(r.Context(), prefix)
if err != nil {
app.Server.Logger.Error(err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
missingChunk := len(existingChunks) != int(file.TotalChunk)
fileData := types.FileData{ fileData := types.FileData{
ID: file.ID.String(), ID: file.ID.String(),
Name: file.Name, Name: file.Name,

View File

@ -1,6 +1,7 @@
package userHandler package userHandler
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -15,7 +16,6 @@ import (
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"gorm.io/gorm" "gorm.io/gorm"
"net/http" "net/http"
"path/filepath"
"strings" "strings"
) )
@ -196,34 +196,27 @@ func handlerWS(conn *websocket.Conn, userSession types.User) {
Name: newFile.Name, Name: newFile.Name,
Size: newFile.Size, Size: newFile.Size,
Downloaded: newFile.Downloaded, Downloaded: newFile.Downloaded,
Done: false,
} }
fileData.Chunk = make(map[string]bool) fileData.Chunk = make(map[string]bool)
saveFolder := filepath.Join("uploads", userSession.UserID.String(), newFile.ID.String()) prefix := fmt.Sprintf("%s/%s/chunk_", userSession.UserID.String(), newFile.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder) existingChunks, err := app.Server.Storage.ListObjects(context.TODO(), prefix)
chunkFiles, err := filepath.Glob(pattern)
if err != nil { if err != nil {
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
fileData.Done = false sendErrorResponse(conn, action.Action, "Unknown error")
continue
} else { } else {
for i := 0; i <= int(newFile.TotalChunk); i++ { for i := 0; i < int(newFile.TotalChunk); i++ {
fileData.Chunk[fmt.Sprintf("chunk_%d", i)] = false fileData.Chunk[fmt.Sprintf("chunk_%d", i)] = false
} }
for _, chunkFile := range chunkFiles { for _, chunkFile := range existingChunks {
var chunkIndex int var chunkIndex int
fmt.Sscanf(filepath.Base(chunkFile), "chunk_%d", &chunkIndex) fmt.Sscanf(chunkFile, "chunk_%d", &chunkIndex)
fileData.Chunk[fmt.Sprintf("chunk_%d", chunkIndex)] = true fileData.Chunk[fmt.Sprintf("chunk_%d", chunkIndex)] = true
} }
for i := 0; i <= int(newFile.TotalChunk); i++ {
if !fileData.Chunk[fmt.Sprintf("chunk_%d", i)] {
fileData.Done = false
break
}
}
} }
sendSuccessResponseWithID(conn, action.Action, fileData, uploadNewFile.RequestID) sendSuccessResponseWithID(conn, action.Action, fileData, uploadNewFile.RequestID)
continue continue
@ -246,10 +239,8 @@ func handlerWS(conn *websocket.Conn, userSession types.User) {
Done: true, Done: true,
} }
saveFolder := filepath.Join("uploads", userSession.UserID.String(), fileData.ID.String()) prefix := fmt.Sprintf("%s/%s/chunk_", userSession.UserID.String(), file.ID.String())
pattern := fmt.Sprintf("%s/chunk_*", saveFolder) existingChunks, err := app.Server.Storage.ListObjects(context.TODO(), prefix)
chunkFiles, err := filepath.Glob(pattern)
if err != nil { if err != nil {
app.Server.Logger.Error(err.Error()) app.Server.Logger.Error(err.Error())
fileData.Done = false fileData.Done = false
@ -257,9 +248,9 @@ func handlerWS(conn *websocket.Conn, userSession types.User) {
for i := 0; i < int(file.TotalChunk); i++ { for i := 0; i < int(file.TotalChunk); i++ {
fileData.Chunk[fmt.Sprintf("chunk_%d", i)] = false fileData.Chunk[fmt.Sprintf("chunk_%d", i)] = false
} }
for _, chunkFile := range chunkFiles { for _, chunkFile := range existingChunks {
var chunkIndex int var chunkIndex int
fmt.Sscanf(filepath.Base(chunkFile), "chunk_%d", &chunkIndex) fmt.Sscanf(chunkFile, "chunk_%d", &chunkIndex)
fileData.Chunk[fmt.Sprintf("chunk_%d", chunkIndex)] = true fileData.Chunk[fmt.Sprintf("chunk_%d", chunkIndex)] = true
} }

10
main.go
View File

@ -2,6 +2,7 @@ package main
import ( import (
"fmt" "fmt"
"github.com/fossyy/filekeeper/storage"
"strconv" "strconv"
"github.com/fossyy/filekeeper/app" "github.com/fossyy/filekeeper/app"
@ -36,7 +37,14 @@ func main() {
smtpPort, _ := strconv.Atoi(utils.Getenv("SMTP_PORT")) smtpPort, _ := strconv.Atoi(utils.Getenv("SMTP_PORT"))
mailServer := email.NewSmtpServer(utils.Getenv("SMTP_HOST"), smtpPort, utils.Getenv("SMTP_USER"), utils.Getenv("SMTP_PASSWORD")) mailServer := email.NewSmtpServer(utils.Getenv("SMTP_HOST"), smtpPort, utils.Getenv("SMTP_USER"), utils.Getenv("SMTP_PASSWORD"))
app.Server = app.NewClientServer(clientAddr, middleware.Handler(client.SetupRoutes()), *logger.Logger(), database, cacheServer, services, mailServer) bucket := utils.Getenv("S3_BUCKET_NAME")
region := utils.Getenv("S3_REGION")
endpoint := utils.Getenv("S3_ENDPOINT")
accessKey := utils.Getenv("S3_ACCESS_KEY")
secretKey := utils.Getenv("S3_SECRET_KEY")
S3 := storage.NewS3(bucket, region, endpoint, accessKey, secretKey)
app.Server = app.NewClientServer(clientAddr, middleware.Handler(client.SetupRoutes()), *logger.Logger(), database, cacheServer, S3, services, mailServer)
app.Admin = app.NewAdminServer(adminAddr, middleware.Handler(admin.SetupRoutes()), database) app.Admin = app.NewAdminServer(adminAddr, middleware.Handler(admin.SetupRoutes()), database)
go func() { go func() {

View File

@ -1,6 +1,8 @@
package client package client
import ( import (
"fmt"
"github.com/fossyy/filekeeper/app"
googleOauthHandler "github.com/fossyy/filekeeper/handler/auth/google" googleOauthHandler "github.com/fossyy/filekeeper/handler/auth/google"
googleOauthCallbackHandler "github.com/fossyy/filekeeper/handler/auth/google/callback" googleOauthCallbackHandler "github.com/fossyy/filekeeper/handler/auth/google/callback"
googleOauthSetupHandler "github.com/fossyy/filekeeper/handler/auth/google/setup" googleOauthSetupHandler "github.com/fossyy/filekeeper/handler/auth/google/setup"
@ -157,6 +159,25 @@ func SetupRoutes() *http.ServeMux {
http.ServeFile(w, r, "public/favicon.ico") http.ServeFile(w, r, "public/favicon.ico")
}) })
handler.HandleFunc("GET /test", func(w http.ResponseWriter, r *http.Request) {
objects, err := app.Server.Storage.ListObjects(r.Context(), "test/")
fmt.Println(objects)
if err != nil {
return
}
if r.URL.Query().Get("new") != "" {
app.Server.Storage.Add(r.Context(), "test.txt", []byte(r.URL.Query().Get("new")))
w.Write([]byte("succes"))
return
}
get, err := app.Server.Storage.Get(r.Context(), "test.txt")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(get)
})
fileServer := http.FileServer(http.Dir("./public")) fileServer := http.FileServer(http.Dir("./public"))
handler.Handle("/public/", http.StripPrefix("/public", fileServer)) handler.Handle("/public/", http.StripPrefix("/public", fileServer))

80
storage/storage.go Normal file
View File

@ -0,0 +1,80 @@
package storage
import (
"bytes"
"context"
"fmt"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"io"
"path/filepath"
)
type S3 struct {
Client *minio.Client
Bucket string
}
func NewS3(bucket string, region string, endpoint string, accessKey string, secretKey string) *S3 {
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: true,
Region: region,
})
if err != nil {
return nil
}
return &S3{Client: minioClient, Bucket: bucket}
}
func (storage *S3) Get(ctx context.Context, key string) ([]byte, error) {
object, err := storage.Client.GetObject(ctx, storage.Bucket, key, minio.GetObjectOptions{})
if err != nil {
return nil, err
}
defer object.Close()
data, err := io.ReadAll(object)
if err != nil {
return nil, err
}
return data, nil
}
func (storage *S3) Add(ctx context.Context, key string, data []byte) error {
reader := bytes.NewReader(data)
_, err := storage.Client.PutObject(ctx, storage.Bucket, key, reader, int64(reader.Len()), minio.PutObjectOptions{
ContentType: "application/octet-stream",
})
if err != nil {
return err
}
return nil
}
func (storage *S3) Delete(ctx context.Context, key string) error {
err := storage.Client.RemoveObject(ctx, storage.Bucket, key, minio.RemoveObjectOptions{})
if err != nil {
return err
}
return nil
}
func (storage *S3) ListObjects(ctx context.Context, prefix string) ([]string, error) {
var objects []string
objectCh := storage.Client.ListObjects(ctx, storage.Bucket, minio.ListObjectsOptions{
Prefix: prefix,
})
for object := range objectCh {
if object.Err != nil {
return nil, fmt.Errorf("failed to list objects: %w", object.Err)
}
fileName := filepath.Base(object.Key)
objects = append(objects, fileName)
}
return objects, nil
}

View File

@ -2,9 +2,10 @@ package types
import ( import (
"context" "context"
"time"
"github.com/fossyy/filekeeper/types/models" "github.com/fossyy/filekeeper/types/models"
"github.com/google/uuid" "github.com/google/uuid"
"time"
) )
type FileStatus string type FileStatus string
@ -92,3 +93,10 @@ type Services interface {
GetUserFile(name, ownerID string) (*FileWithDetail, error) GetUserFile(name, ownerID string) (*FileWithDetail, error)
GetUserStorageUsage(ownerID string) (uint64, error) GetUserStorageUsage(ownerID string) (uint64, error)
} }
type Storage interface {
Get(ctx context.Context, key string) ([]byte, error)
Add(ctx context.Context, key string, data []byte) error
Delete(ctx context.Context, key string) error
ListObjects(ctx context.Context, prefix string) ([]string, error)
}