Implement file caching for faster retrieval and reduced database load
This commit is contained in:
178
cache/cache.go
vendored
Normal file
178
cache/cache.go
vendored
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/fossyy/filekeeper/db"
|
||||||
|
"github.com/fossyy/filekeeper/logger"
|
||||||
|
"github.com/fossyy/filekeeper/utils"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UserWithExpired struct {
|
||||||
|
UserID uuid.UUID
|
||||||
|
Username string
|
||||||
|
Email string
|
||||||
|
Password string
|
||||||
|
AccessAt time.Time
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileWithExpired struct {
|
||||||
|
ID uuid.UUID
|
||||||
|
OwnerID uuid.UUID
|
||||||
|
Name string
|
||||||
|
Size int64
|
||||||
|
Downloaded int64
|
||||||
|
UploadedByte int64
|
||||||
|
UploadedChunk int64
|
||||||
|
Done bool
|
||||||
|
AccessAt time.Time
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var log *logger.AggregatedLogger
|
||||||
|
var userCache map[string]*UserWithExpired
|
||||||
|
var fileCache map[string]*FileWithExpired
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log = logger.Logger()
|
||||||
|
|
||||||
|
userCache = make(map[string]*UserWithExpired)
|
||||||
|
fileCache = make(map[string]*FileWithExpired)
|
||||||
|
ticker := time.NewTicker(time.Minute)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
<-ticker.C
|
||||||
|
currentTime := time.Now()
|
||||||
|
cacheClean := 0
|
||||||
|
cleanID := utils.GenerateRandomString(10)
|
||||||
|
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] initiated at %02d:%02d:%02d", cleanID, currentTime.Hour(), currentTime.Minute(), currentTime.Second()))
|
||||||
|
|
||||||
|
for _, user := range userCache {
|
||||||
|
user.mu.Lock()
|
||||||
|
if currentTime.Sub(user.AccessAt) > time.Hour*8 {
|
||||||
|
delete(userCache, user.Email)
|
||||||
|
cacheClean++
|
||||||
|
}
|
||||||
|
user.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] completed: %d entries removed. Finished at %s", cleanID, cacheClean, time.Since(currentTime)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
<-ticker.C
|
||||||
|
currentTime := time.Now()
|
||||||
|
cacheClean := 0
|
||||||
|
cleanID := utils.GenerateRandomString(10)
|
||||||
|
log.Info(fmt.Sprintf("Cache cleanup [files] [%s] initiated at %02d:%02d:%02d", cleanID, currentTime.Hour(), currentTime.Minute(), currentTime.Second()))
|
||||||
|
|
||||||
|
for _, file := range fileCache {
|
||||||
|
file.mu.Lock()
|
||||||
|
if currentTime.Sub(file.AccessAt) > time.Minute*10 {
|
||||||
|
db.DB.UpdateUploadedByte(file.UploadedByte, file.ID.String())
|
||||||
|
db.DB.UpdateUploadedChunk(file.UploadedChunk, file.ID.String())
|
||||||
|
delete(fileCache, file.ID.String())
|
||||||
|
cacheClean++
|
||||||
|
}
|
||||||
|
file.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("Cache cleanup [files] [%s] completed: %d entries removed. Finished at %s", cleanID, cacheClean, time.Since(currentTime)))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetUser(email string) (*UserWithExpired, error) {
|
||||||
|
if user, ok := userCache[email]; ok {
|
||||||
|
return user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
userData, err := db.DB.GetUser(email)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
userCache[email] = &UserWithExpired{
|
||||||
|
UserID: userData.UserID,
|
||||||
|
Username: userData.Username,
|
||||||
|
Email: userData.Email,
|
||||||
|
Password: userData.Password,
|
||||||
|
AccessAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return userCache[email], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeleteUser(email string) {
|
||||||
|
userCache[email].mu.Lock()
|
||||||
|
defer userCache[email].mu.Unlock()
|
||||||
|
|
||||||
|
delete(userCache, email)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFile(id string) (*FileWithExpired, error) {
|
||||||
|
if file, ok := fileCache[id]; ok {
|
||||||
|
file.AccessAt = time.Now()
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadData, err := db.DB.GetFile(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fmt.Println("nih : ", uploadData)
|
||||||
|
fileCache[id] = &FileWithExpired{
|
||||||
|
ID: uploadData.ID,
|
||||||
|
OwnerID: uploadData.OwnerID,
|
||||||
|
Name: uploadData.Name,
|
||||||
|
Size: uploadData.Size,
|
||||||
|
Downloaded: uploadData.Downloaded,
|
||||||
|
UploadedByte: uploadData.UploadedByte,
|
||||||
|
UploadedChunk: uploadData.UploadedChunk,
|
||||||
|
Done: uploadData.Done,
|
||||||
|
AccessAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return fileCache[id], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (file *FileWithExpired) UpdateProgress(index int64, size int64) {
|
||||||
|
file.UploadedChunk = index
|
||||||
|
file.UploadedByte = size
|
||||||
|
file.AccessAt = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetUserFile(name, ownerID string) (*FileWithExpired, error) {
|
||||||
|
fileData, err := db.DB.GetUserFile(name, ownerID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := GetFile(fileData.ID.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (file *FileWithExpired) FinalizeFileUpload() {
|
||||||
|
db.DB.UpdateUploadedByte(file.UploadedByte, file.ID.String())
|
||||||
|
db.DB.UpdateUploadedChunk(file.UploadedChunk, file.ID.String())
|
||||||
|
db.DB.FinalizeFileUpload(file.ID.String())
|
||||||
|
delete(fileCache, file.ID.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//func DeleteUploadInfo(id string) {
|
||||||
|
// filesUploadedCache[id].mu.Lock()
|
||||||
|
// defer filesUploadedCache[id].mu.Unlock()
|
||||||
|
//
|
||||||
|
// delete(filesUploadedCache, id)
|
||||||
|
//}
|
79
cache/user.go
vendored
79
cache/user.go
vendored
@ -1,79 +0,0 @@
|
|||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/fossyy/filekeeper/db"
|
|
||||||
"github.com/fossyy/filekeeper/logger"
|
|
||||||
"github.com/fossyy/filekeeper/utils"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type UserWithExpired struct {
|
|
||||||
UserID uuid.UUID
|
|
||||||
Username string
|
|
||||||
Email string
|
|
||||||
Password string
|
|
||||||
AccessAt time.Time
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
var log *logger.AggregatedLogger
|
|
||||||
var userCache map[string]*UserWithExpired
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log = logger.Logger()
|
|
||||||
|
|
||||||
userCache = make(map[string]*UserWithExpired)
|
|
||||||
ticker := time.NewTicker(time.Minute)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
<-ticker.C
|
|
||||||
currentTime := time.Now()
|
|
||||||
cacheClean := 0
|
|
||||||
cleanID := utils.GenerateRandomString(10)
|
|
||||||
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] initiated at %02d:%02d:%02d", cleanID, currentTime.Hour(), currentTime.Minute(), currentTime.Second()))
|
|
||||||
|
|
||||||
for _, user := range userCache {
|
|
||||||
user.mu.Lock()
|
|
||||||
if currentTime.Sub(user.AccessAt) > time.Hour*8 {
|
|
||||||
delete(userCache, user.Email)
|
|
||||||
cacheClean++
|
|
||||||
}
|
|
||||||
user.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] completed: %d entries removed. Finished at %s", cleanID, cacheClean, time.Since(currentTime)))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetUser(email string) (*UserWithExpired, error) {
|
|
||||||
if user, ok := userCache[email]; ok {
|
|
||||||
return user, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
userData, err := db.DB.GetUser(email)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
userCache[email] = &UserWithExpired{
|
|
||||||
UserID: userData.UserID,
|
|
||||||
Username: userData.Username,
|
|
||||||
Email: userData.Email,
|
|
||||||
Password: userData.Password,
|
|
||||||
AccessAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return userCache[email], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func DeleteUser(email string) {
|
|
||||||
userCache[email].mu.Lock()
|
|
||||||
defer userCache[email].mu.Unlock()
|
|
||||||
|
|
||||||
delete(userCache, email)
|
|
||||||
}
|
|
@ -41,9 +41,8 @@ type Database interface {
|
|||||||
GetUserFile(name string, ownerID string) (*models.File, error)
|
GetUserFile(name string, ownerID string) (*models.File, error)
|
||||||
GetFiles(ownerID string) ([]*models.File, error)
|
GetFiles(ownerID string) ([]*models.File, error)
|
||||||
|
|
||||||
CreateUploadInfo(info models.FilesUploaded) error
|
UpdateUploadedByte(index int64, fileID string)
|
||||||
GetUploadInfo(uploadID string) (*models.FilesUploaded, error)
|
UpdateUploadedChunk(index int64, fileID string)
|
||||||
UpdateUpdateIndex(index int, fileID string)
|
|
||||||
FinalizeFileUpload(fileID string)
|
FinalizeFileUpload(fileID string)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,6 +184,7 @@ func (db *mySQLdb) CreateFile(file *models.File) error {
|
|||||||
|
|
||||||
func (db *mySQLdb) GetFile(fileID string) (*models.File, error) {
|
func (db *mySQLdb) GetFile(fileID string) (*models.File, error) {
|
||||||
var file models.File
|
var file models.File
|
||||||
|
fmt.Println(fileID)
|
||||||
err := db.DB.Table("files").Where("id = ?", fileID).First(&file).Error
|
err := db.DB.Table("files").Where("id = ?", fileID).First(&file).Error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -210,32 +210,19 @@ func (db *mySQLdb) GetFiles(ownerID string) ([]*models.File, error) {
|
|||||||
return files, err
|
return files, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateUploadInfo It's not optimal, but it's okay for now. Consider implementing caching instead of pushing all updates to the database for better performance in the future.
|
func (db *mySQLdb) UpdateUploadedByte(byte int64, fileID string) {
|
||||||
func (db *mySQLdb) CreateUploadInfo(info models.FilesUploaded) error {
|
db.DB.Table("files").Where("id = ?", fileID).Updates(map[string]interface{}{
|
||||||
err := db.DB.Create(info).Error
|
"Uploaded_byte": byte,
|
||||||
if err != nil {
|
})
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
func (db *mySQLdb) UpdateUploadedChunk(index int64, fileID string) {
|
||||||
func (db *mySQLdb) GetUploadInfo(fileID string) (*models.FilesUploaded, error) {
|
db.DB.Table("files").Where("id = ?", fileID).Updates(map[string]interface{}{
|
||||||
var info models.FilesUploaded
|
"Uploaded_chunk": index,
|
||||||
err := db.DB.Table("files_uploadeds").Where("file_id = ?", fileID).First(&info).Error
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *mySQLdb) UpdateUpdateIndex(index int, fileID string) {
|
|
||||||
db.DB.Table("files_uploadeds").Where("file_id = ?", fileID).Updates(map[string]interface{}{
|
|
||||||
"Uploaded": index,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *mySQLdb) FinalizeFileUpload(fileID string) {
|
func (db *mySQLdb) FinalizeFileUpload(fileID string) {
|
||||||
db.DB.Table("files_uploadeds").Where("file_id = ?", fileID).Updates(map[string]interface{}{
|
db.DB.Table("files").Where("id = ?", fileID).Updates(map[string]interface{}{
|
||||||
"Done": true,
|
"Done": true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -313,32 +300,19 @@ func (db *postgresDB) GetFiles(ownerID string) ([]*models.File, error) {
|
|||||||
return files, err
|
return files, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateUploadInfo It's not optimal, but it's okay for now. Consider implementing caching instead of pushing all updates to the database for better performance in the future.
|
func (db *postgresDB) UpdateUploadedByte(byte int64, fileID string) {
|
||||||
func (db *postgresDB) CreateUploadInfo(info models.FilesUploaded) error {
|
db.DB.Table("files").Where("id = $1", fileID).Updates(map[string]interface{}{
|
||||||
err := db.DB.Create(info).Error
|
"Uploaded_byte": byte,
|
||||||
if err != nil {
|
})
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
func (db *postgresDB) UpdateUploadedChunk(index int64, fileID string) {
|
||||||
func (db *postgresDB) GetUploadInfo(fileID string) (*models.FilesUploaded, error) {
|
db.DB.Table("files").Where("id = $1", fileID).Updates(map[string]interface{}{
|
||||||
var info models.FilesUploaded
|
"Uploaded_chunk": index,
|
||||||
err := db.DB.Table("files_uploadeds").Where("file_id = $1", fileID).First(&info).Error
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *postgresDB) UpdateUpdateIndex(index int, fileID string) {
|
|
||||||
db.DB.Table("files_uploadeds").Where("file_id = $1", fileID).Updates(map[string]interface{}{
|
|
||||||
"Uploaded": index,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *postgresDB) FinalizeFileUpload(fileID string) {
|
func (db *postgresDB) FinalizeFileUpload(fileID string) {
|
||||||
db.DB.Table("files_uploadeds").Where("file_id = $1", fileID).Updates(map[string]interface{}{
|
db.DB.Table("files").Where("id = $1", fileID).Updates(map[string]interface{}{
|
||||||
"Done": true,
|
"Done": true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@ package initialisation
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/fossyy/filekeeper/cache"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -37,7 +39,7 @@ func POST(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fileData, err := db.DB.GetUserFile(fileInfo.Name, userSession.UserID.String())
|
fileData, err := cache.GetUserFile(fileInfo.Name, userSession.UserID.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
upload, err := handleNewUpload(userSession, fileInfo)
|
upload, err := handleNewUpload(userSession, fileInfo)
|
||||||
@ -52,27 +54,22 @@ func POST(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := db.DB.GetUploadInfo(fileData.ID.String())
|
if fileData.Done {
|
||||||
if err != nil {
|
|
||||||
log.Error(err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.Done {
|
|
||||||
respondJSON(w, map[string]bool{"Done": true})
|
respondJSON(w, map[string]bool{"Done": true})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
respondJSON(w, info)
|
fmt.Println("nih2 : ", fileData)
|
||||||
|
respondJSON(w, fileData)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNewUpload(user types.User, file types.FileInfo) (models.FilesUploaded, error) {
|
func handleNewUpload(user types.User, file types.FileInfo) (models.File, error) {
|
||||||
uploadDir := "uploads"
|
uploadDir := "uploads"
|
||||||
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
|
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
err := os.Mkdir(uploadDir, os.ModePerm)
|
err := os.Mkdir(uploadDir, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
return models.FilesUploaded{}, err
|
return models.File{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,45 +80,33 @@ func handleNewUpload(user types.User, file types.FileInfo) (models.FilesUploaded
|
|||||||
basePath := filepath.Join(currentDir, uploadDir)
|
basePath := filepath.Join(currentDir, uploadDir)
|
||||||
saveFolder := filepath.Join(basePath, ownerID.String(), fileID.String())
|
saveFolder := filepath.Join(basePath, ownerID.String(), fileID.String())
|
||||||
if filepath.Dir(saveFolder) != filepath.Join(basePath, ownerID.String()) {
|
if filepath.Dir(saveFolder) != filepath.Join(basePath, ownerID.String()) {
|
||||||
return models.FilesUploaded{}, errors.New("invalid path")
|
return models.File{}, errors.New("invalid path")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := os.MkdirAll(saveFolder, os.ModePerm)
|
err := os.MkdirAll(saveFolder, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
return models.FilesUploaded{}, err
|
return models.File{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newFile := models.File{
|
newFile := models.File{
|
||||||
ID: fileID,
|
ID: fileID,
|
||||||
OwnerID: ownerID,
|
OwnerID: ownerID,
|
||||||
Name: file.Name,
|
Name: file.Name,
|
||||||
Size: file.Size,
|
Size: file.Size,
|
||||||
Downloaded: 0,
|
Downloaded: 0,
|
||||||
|
UploadedByte: 0,
|
||||||
|
UploadedChunk: -1,
|
||||||
|
Done: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = db.DB.CreateFile(&newFile)
|
err = db.DB.CreateFile(&newFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err.Error())
|
log.Error(err.Error())
|
||||||
return models.FilesUploaded{}, err
|
return models.File{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
filesUploaded := models.FilesUploaded{
|
return newFile, nil
|
||||||
UploadID: uuid.New(),
|
|
||||||
FileID: fileID,
|
|
||||||
OwnerID: ownerID,
|
|
||||||
Name: file.Name,
|
|
||||||
Size: file.Size,
|
|
||||||
Uploaded: -1,
|
|
||||||
Done: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = db.DB.CreateUploadInfo(filesUploaded)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err.Error())
|
|
||||||
return models.FilesUploaded{}, err
|
|
||||||
}
|
|
||||||
return filesUploaded, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func respondJSON(w http.ResponseWriter, data interface{}) {
|
func respondJSON(w http.ResponseWriter, data interface{}) {
|
||||||
|
@ -1,22 +1,18 @@
|
|||||||
package uploadHandler
|
package uploadHandler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"github.com/fossyy/filekeeper/cache"
|
||||||
"github.com/fossyy/filekeeper/db"
|
"github.com/fossyy/filekeeper/logger"
|
||||||
"github.com/fossyy/filekeeper/types"
|
"github.com/fossyy/filekeeper/types"
|
||||||
|
filesView "github.com/fossyy/filekeeper/view/upload"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/fossyy/filekeeper/logger"
|
|
||||||
filesView "github.com/fossyy/filekeeper/view/upload"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var log *logger.AggregatedLogger
|
var log *logger.AggregatedLogger
|
||||||
var mu sync.Mutex
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
log = logger.Logger()
|
log = logger.Logger()
|
||||||
@ -25,7 +21,7 @@ func init() {
|
|||||||
func GET(w http.ResponseWriter, r *http.Request) {
|
func GET(w http.ResponseWriter, r *http.Request) {
|
||||||
component := filesView.Main("upload page")
|
component := filesView.Main("upload page")
|
||||||
if err := component.Render(r.Context(), w); err != nil {
|
if err := component.Render(r.Context(), w); err != nil {
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -33,32 +29,31 @@ func GET(w http.ResponseWriter, r *http.Request) {
|
|||||||
func POST(w http.ResponseWriter, r *http.Request) {
|
func POST(w http.ResponseWriter, r *http.Request) {
|
||||||
fileID := r.PathValue("id")
|
fileID := r.PathValue("id")
|
||||||
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
userSession := r.Context().Value("user").(types.User)
|
userSession := r.Context().Value("user").(types.User)
|
||||||
|
|
||||||
if r.FormValue("done") == "true" {
|
|
||||||
db.DB.FinalizeFileUpload(fileID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
uploadDir := "uploads"
|
uploadDir := "uploads"
|
||||||
if err := createUploadDirectory(uploadDir); err != nil {
|
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
if err := os.Mkdir(uploadDir, os.ModePerm); err != nil {
|
||||||
return
|
log.Error("error getting upload info: " + err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := db.DB.GetUploadInfo(fileID)
|
file, err := cache.GetFile(fileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("error getting upload info: " + err.Error())
|
log.Error("error getting upload info: " + err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
currentDir, _ := os.Getwd()
|
currentDir, _ := os.Getwd()
|
||||||
basePath := filepath.Join(currentDir, uploadDir)
|
basePath := filepath.Join(currentDir, uploadDir)
|
||||||
saveFolder := filepath.Join(basePath, userSession.UserID.String(), file.FileID.String())
|
saveFolder := filepath.Join(basePath, userSession.UserID.String(), file.ID.String())
|
||||||
|
|
||||||
if filepath.Dir(saveFolder) != filepath.Join(basePath, userSession.UserID.String()) {
|
if filepath.Dir(saveFolder) != filepath.Join(basePath, userSession.UserID.String()) {
|
||||||
log.Error("invalid path")
|
log.Error("invalid path")
|
||||||
@ -66,49 +61,39 @@ func POST(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fileByte, _, err := r.FormFile("chunk")
|
fileByte, fileHeader, err := r.FormFile("chunk")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
log.Error("error getting upload info: " + err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer fileByte.Close()
|
defer fileByte.Close()
|
||||||
|
|
||||||
dst, err := os.OpenFile(filepath.Join(saveFolder, file.Name), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer dst.Close()
|
|
||||||
if _, err := io.Copy(dst, fileByte); err != nil {
|
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rawIndex := r.FormValue("index")
|
rawIndex := r.FormValue("index")
|
||||||
index, err := strconv.Atoi(rawIndex)
|
index, err := strconv.Atoi(rawIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
db.DB.UpdateUpdateIndex(index, fileID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createUploadDirectory(uploadDir string) error {
|
file.UpdateProgress(int64(index), file.UploadedByte+int64(fileHeader.Size))
|
||||||
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
|
|
||||||
if err := os.Mkdir(uploadDir, os.ModePerm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleCookieError(w http.ResponseWriter, r *http.Request, err error) {
|
dst, err := os.OpenFile(filepath.Join(saveFolder, file.Name), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
|
||||||
if errors.Is(err, http.ErrNoCookie) {
|
if err != nil {
|
||||||
http.Redirect(w, r, "/signin", http.StatusSeeOther)
|
log.Error("error making upload folder: " + err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
handleError(w, err, http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleError(w http.ResponseWriter, err error, status int) {
|
defer dst.Close()
|
||||||
http.Error(w, err.Error(), status)
|
if _, err := io.Copy(dst, fileByte); err != nil {
|
||||||
log.Error(err.Error())
|
log.Error("error copying byte to file dst: " + err.Error())
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.UploadedByte >= file.Size {
|
||||||
|
file.FinalizeFileUpload()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ async function handleFile(file){
|
|||||||
if (responseData.Done === false) {
|
if (responseData.Done === false) {
|
||||||
addNewUploadElement(file)
|
addNewUploadElement(file)
|
||||||
const fileChunks = await splitFile(file, chunkSize);
|
const fileChunks = await splitFile(file, chunkSize);
|
||||||
await uploadChunks(file.name,file.size, fileChunks, responseData.Uploaded, responseData.FileID);
|
await uploadChunks(file.name,file.size, fileChunks, responseData.UploadedChunk, responseData.ID);
|
||||||
} else {
|
} else {
|
||||||
alert("file already uploaded")
|
alert("file already uploaded")
|
||||||
}
|
}
|
||||||
@ -125,10 +125,10 @@ async function splitFile(file, chunkSize) {
|
|||||||
|
|
||||||
async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
|
async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
|
||||||
let byteUploaded = 0
|
let byteUploaded = 0
|
||||||
var progress1 = document.getElementById(`progress-${name}-1`);
|
let progress1 = document.getElementById(`progress-${name}-1`);
|
||||||
var progress2 = document.getElementById(`progress-${name}-2`);
|
let progress2 = document.getElementById(`progress-${name}-2`);
|
||||||
var progress3 = document.getElementById(`progress-${name}-3`);
|
let progress3 = document.getElementById(`progress-${name}-3`);
|
||||||
var progress4 = document.getElementById(`progress-${name}-4`);
|
let progress4 = document.getElementById(`progress-${name}-4`);
|
||||||
for (let index = 0; index < chunks.length; index++) {
|
for (let index = 0; index < chunks.length; index++) {
|
||||||
const percentComplete = Math.round((index + 1) / chunks.length * 100);
|
const percentComplete = Math.round((index + 1) / chunks.length * 100);
|
||||||
const chunk = chunks[index];
|
const chunk = chunks[index];
|
||||||
@ -152,6 +152,7 @@ async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
|
|||||||
const totalTime = (endTime - startTime) / 1000;
|
const totalTime = (endTime - startTime) / 1000;
|
||||||
const uploadSpeed = chunk.size / totalTime / 1024 / 1024;
|
const uploadSpeed = chunk.size / totalTime / 1024 / 1024;
|
||||||
byteUploaded += chunk.size
|
byteUploaded += chunk.size
|
||||||
|
console.log(byteUploaded)
|
||||||
progress3.innerText = `${uploadSpeed.toFixed(2)} MB/s`;
|
progress3.innerText = `${uploadSpeed.toFixed(2)} MB/s`;
|
||||||
progress4.innerText = `Uploading ${percentComplete}% - ${convertFileSize(byteUploaded)} of ${ convertFileSize(size)}`;
|
progress4.innerText = `Uploading ${percentComplete}% - ${convertFileSize(byteUploaded)} of ${ convertFileSize(size)}`;
|
||||||
} else {
|
} else {
|
||||||
@ -160,14 +161,4 @@ async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
|
|||||||
byteUploaded += chunk.size
|
byteUploaded += chunk.size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const formData = new FormData();
|
|
||||||
formData.append('name', name);
|
|
||||||
formData.append('done', true);
|
|
||||||
return fetch(`/upload/${FileID}`, {
|
|
||||||
method: 'POST',
|
|
||||||
body: formData
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,3 +1,2 @@
|
|||||||
CREATE TABLE IF NOT EXISTS users (user_id VARCHAR(255) PRIMARY KEY NOT NULL,username VARCHAR(255) UNIQUE NOT NULL,email VARCHAR(255) UNIQUE NOT NULL,password TEXT NOT NULL);
|
CREATE TABLE IF NOT EXISTS users (user_id VARCHAR(255) PRIMARY KEY NOT NULL,username VARCHAR(255) UNIQUE NOT NULL,email VARCHAR(255) UNIQUE NOT NULL,password TEXT NOT NULL);
|
||||||
CREATE TABLE IF NOT EXISTS files (id VARCHAR(255) PRIMARY KEY NOT NULL,owner_id VARCHAR(255) NOT NULL,name TEXT NOT NULL,size BIGINT NOT NULL,downloaded BIGINT NOT NULL,FOREIGN KEY (owner_id) REFERENCES users(user_id));
|
CREATE TABLE IF NOT EXISTS files (id VARCHAR(255) PRIMARY KEY NOT NULL,owner_id VARCHAR(255) NOT NULL,name TEXT NOT NULL,size BIGINT NOT NULL,downloaded BIGINT NOT NULL,uploaded_byte BIGINT NOT NULL DEFAULT 0, uploaded_chunk BIGINT NOT NULL DEFAULT -1,done BOOLEAN NOT NULL DEFAULT FALSE,FOREIGN KEY (owner_id) REFERENCES users(user_id));
|
||||||
CREATE TABLE IF NOT EXISTS files_uploadeds (upload_id VARCHAR(255) PRIMARY KEY NOT NULL,file_id VARCHAR(255) NOT NULL,owner_id VARCHAR(255) NOT NULL,name TEXT NOT NULL,size INT NOT NULL,uploaded INT NOT NULL DEFAULT 0,done BOOLEAN NOT NULL DEFAULT FALSE,FOREIGN KEY (file_id) REFERENCES files(id),FOREIGN KEY (owner_id) REFERENCES users(user_id));
|
|
@ -10,19 +10,12 @@ type User struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
ID uuid.UUID `gorm:"primaryKey;not null;unique"`
|
ID uuid.UUID `gorm:"primaryKey;not null;unique"`
|
||||||
OwnerID uuid.UUID `gorm:"not null"`
|
OwnerID uuid.UUID `gorm:"not null"`
|
||||||
Name string `gorm:"not null"`
|
Name string `gorm:"not null"`
|
||||||
Size int `gorm:"not null"`
|
Size int64 `gorm:"not null"`
|
||||||
Downloaded int `gorm:"not null;default=0"`
|
Downloaded int64 `gorm:"not null;default=0"`
|
||||||
}
|
UploadedByte int64 `gorm:"not null;default=0"`
|
||||||
|
UploadedChunk int64 `gorm:"not null;default=0"`
|
||||||
type FilesUploaded struct {
|
Done bool `gorm:"not null;default=false"`
|
||||||
UploadID uuid.UUID `gorm:"primaryKey;not null;unique"`
|
|
||||||
FileID uuid.UUID `gorm:"not null"`
|
|
||||||
OwnerID uuid.UUID `gorm:"not null"`
|
|
||||||
Name string `gorm:"not null"`
|
|
||||||
Size int `gorm:"not null"`
|
|
||||||
Uploaded int `gorm:"not null;default=0"`
|
|
||||||
Done bool `gorm:"not null;default=false"`
|
|
||||||
}
|
}
|
||||||
|
@ -18,20 +18,13 @@ type User struct {
|
|||||||
|
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Chunk int `json:"chunk"`
|
Chunk int64 `json:"chunk"`
|
||||||
}
|
|
||||||
|
|
||||||
type FileInfoUploaded struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
Chunk int `json:"chunk"`
|
|
||||||
UploadedChunk int `json:"uploaded_chunk"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileData struct {
|
type FileData struct {
|
||||||
ID string
|
ID string
|
||||||
Name string
|
Name string
|
||||||
Size string
|
Size string
|
||||||
Downloaded int
|
Downloaded int64
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func ValidatePassword(password string) bool {
|
|||||||
return hasSymbol && hasNumber >= 3 && hasUppercase
|
return hasSymbol && hasNumber >= 3 && hasUppercase
|
||||||
}
|
}
|
||||||
|
|
||||||
func ConvertFileSize(byte int) string {
|
func ConvertFileSize(byte int64) string {
|
||||||
if byte < 1024 {
|
if byte < 1024 {
|
||||||
return fmt.Sprintf("%d B", byte)
|
return fmt.Sprintf("%d B", byte)
|
||||||
} else if byte < 1024*1024 {
|
} else if byte < 1024*1024 {
|
||||||
|
Reference in New Issue
Block a user