Implement file caching for faster retrieval and reduced database load

This commit is contained in:
2024-05-06 21:27:26 +07:00
parent df4f7cc9c8
commit 3e3e95bef4
10 changed files with 271 additions and 252 deletions

178
cache/cache.go vendored Normal file
View File

@ -0,0 +1,178 @@
package cache
import (
"fmt"
"github.com/fossyy/filekeeper/db"
"github.com/fossyy/filekeeper/logger"
"github.com/fossyy/filekeeper/utils"
"github.com/google/uuid"
"sync"
"time"
)
type UserWithExpired struct {
UserID uuid.UUID
Username string
Email string
Password string
AccessAt time.Time
mu sync.Mutex
}
type FileWithExpired struct {
ID uuid.UUID
OwnerID uuid.UUID
Name string
Size int64
Downloaded int64
UploadedByte int64
UploadedChunk int64
Done bool
AccessAt time.Time
mu sync.Mutex
}
var log *logger.AggregatedLogger
var userCache map[string]*UserWithExpired
var fileCache map[string]*FileWithExpired
func init() {
log = logger.Logger()
userCache = make(map[string]*UserWithExpired)
fileCache = make(map[string]*FileWithExpired)
ticker := time.NewTicker(time.Minute)
go func() {
for {
<-ticker.C
currentTime := time.Now()
cacheClean := 0
cleanID := utils.GenerateRandomString(10)
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] initiated at %02d:%02d:%02d", cleanID, currentTime.Hour(), currentTime.Minute(), currentTime.Second()))
for _, user := range userCache {
user.mu.Lock()
if currentTime.Sub(user.AccessAt) > time.Hour*8 {
delete(userCache, user.Email)
cacheClean++
}
user.mu.Unlock()
}
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] completed: %d entries removed. Finished at %s", cleanID, cacheClean, time.Since(currentTime)))
}
}()
go func() {
for {
<-ticker.C
currentTime := time.Now()
cacheClean := 0
cleanID := utils.GenerateRandomString(10)
log.Info(fmt.Sprintf("Cache cleanup [files] [%s] initiated at %02d:%02d:%02d", cleanID, currentTime.Hour(), currentTime.Minute(), currentTime.Second()))
for _, file := range fileCache {
file.mu.Lock()
if currentTime.Sub(file.AccessAt) > time.Minute*10 {
db.DB.UpdateUploadedByte(file.UploadedByte, file.ID.String())
db.DB.UpdateUploadedChunk(file.UploadedChunk, file.ID.String())
delete(fileCache, file.ID.String())
cacheClean++
}
file.mu.Unlock()
}
log.Info(fmt.Sprintf("Cache cleanup [files] [%s] completed: %d entries removed. Finished at %s", cleanID, cacheClean, time.Since(currentTime)))
}
}()
}
func GetUser(email string) (*UserWithExpired, error) {
if user, ok := userCache[email]; ok {
return user, nil
}
userData, err := db.DB.GetUser(email)
if err != nil {
return nil, err
}
userCache[email] = &UserWithExpired{
UserID: userData.UserID,
Username: userData.Username,
Email: userData.Email,
Password: userData.Password,
AccessAt: time.Now(),
}
return userCache[email], nil
}
func DeleteUser(email string) {
userCache[email].mu.Lock()
defer userCache[email].mu.Unlock()
delete(userCache, email)
}
func GetFile(id string) (*FileWithExpired, error) {
if file, ok := fileCache[id]; ok {
file.AccessAt = time.Now()
return file, nil
}
uploadData, err := db.DB.GetFile(id)
if err != nil {
return nil, err
}
fmt.Println("nih : ", uploadData)
fileCache[id] = &FileWithExpired{
ID: uploadData.ID,
OwnerID: uploadData.OwnerID,
Name: uploadData.Name,
Size: uploadData.Size,
Downloaded: uploadData.Downloaded,
UploadedByte: uploadData.UploadedByte,
UploadedChunk: uploadData.UploadedChunk,
Done: uploadData.Done,
AccessAt: time.Now(),
}
return fileCache[id], nil
}
func (file *FileWithExpired) UpdateProgress(index int64, size int64) {
file.UploadedChunk = index
file.UploadedByte = size
file.AccessAt = time.Now()
}
func GetUserFile(name, ownerID string) (*FileWithExpired, error) {
fileData, err := db.DB.GetUserFile(name, ownerID)
if err != nil {
return nil, err
}
file, err := GetFile(fileData.ID.String())
if err != nil {
return nil, err
}
return file, nil
}
func (file *FileWithExpired) FinalizeFileUpload() {
db.DB.UpdateUploadedByte(file.UploadedByte, file.ID.String())
db.DB.UpdateUploadedChunk(file.UploadedChunk, file.ID.String())
db.DB.FinalizeFileUpload(file.ID.String())
delete(fileCache, file.ID.String())
return
}
//func DeleteUploadInfo(id string) {
// filesUploadedCache[id].mu.Lock()
// defer filesUploadedCache[id].mu.Unlock()
//
// delete(filesUploadedCache, id)
//}

79
cache/user.go vendored
View File

@ -1,79 +0,0 @@
package cache
import (
"fmt"
"github.com/fossyy/filekeeper/db"
"github.com/fossyy/filekeeper/logger"
"github.com/fossyy/filekeeper/utils"
"github.com/google/uuid"
"sync"
"time"
)
type UserWithExpired struct {
UserID uuid.UUID
Username string
Email string
Password string
AccessAt time.Time
mu sync.Mutex
}
var log *logger.AggregatedLogger
var userCache map[string]*UserWithExpired
func init() {
log = logger.Logger()
userCache = make(map[string]*UserWithExpired)
ticker := time.NewTicker(time.Minute)
go func() {
for {
<-ticker.C
currentTime := time.Now()
cacheClean := 0
cleanID := utils.GenerateRandomString(10)
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] initiated at %02d:%02d:%02d", cleanID, currentTime.Hour(), currentTime.Minute(), currentTime.Second()))
for _, user := range userCache {
user.mu.Lock()
if currentTime.Sub(user.AccessAt) > time.Hour*8 {
delete(userCache, user.Email)
cacheClean++
}
user.mu.Unlock()
}
log.Info(fmt.Sprintf("Cache cleanup [user] [%s] completed: %d entries removed. Finished at %s", cleanID, cacheClean, time.Since(currentTime)))
}
}()
}
func GetUser(email string) (*UserWithExpired, error) {
if user, ok := userCache[email]; ok {
return user, nil
}
userData, err := db.DB.GetUser(email)
if err != nil {
return nil, err
}
userCache[email] = &UserWithExpired{
UserID: userData.UserID,
Username: userData.Username,
Email: userData.Email,
Password: userData.Password,
AccessAt: time.Now(),
}
return userCache[email], nil
}
func DeleteUser(email string) {
userCache[email].mu.Lock()
defer userCache[email].mu.Unlock()
delete(userCache, email)
}

View File

@ -41,9 +41,8 @@ type Database interface {
GetUserFile(name string, ownerID string) (*models.File, error)
GetFiles(ownerID string) ([]*models.File, error)
CreateUploadInfo(info models.FilesUploaded) error
GetUploadInfo(uploadID string) (*models.FilesUploaded, error)
UpdateUpdateIndex(index int, fileID string)
UpdateUploadedByte(index int64, fileID string)
UpdateUploadedChunk(index int64, fileID string)
FinalizeFileUpload(fileID string)
}
@ -185,6 +184,7 @@ func (db *mySQLdb) CreateFile(file *models.File) error {
func (db *mySQLdb) GetFile(fileID string) (*models.File, error) {
var file models.File
fmt.Println(fileID)
err := db.DB.Table("files").Where("id = ?", fileID).First(&file).Error
if err != nil {
return nil, err
@ -210,32 +210,19 @@ func (db *mySQLdb) GetFiles(ownerID string) ([]*models.File, error) {
return files, err
}
// CreateUploadInfo It's not optimal, but it's okay for now. Consider implementing caching instead of pushing all updates to the database for better performance in the future.
func (db *mySQLdb) CreateUploadInfo(info models.FilesUploaded) error {
err := db.DB.Create(info).Error
if err != nil {
return err
func (db *mySQLdb) UpdateUploadedByte(byte int64, fileID string) {
db.DB.Table("files").Where("id = ?", fileID).Updates(map[string]interface{}{
"Uploaded_byte": byte,
})
}
return nil
}
func (db *mySQLdb) GetUploadInfo(fileID string) (*models.FilesUploaded, error) {
var info models.FilesUploaded
err := db.DB.Table("files_uploadeds").Where("file_id = ?", fileID).First(&info).Error
if err != nil {
return nil, err
}
return &info, nil
}
func (db *mySQLdb) UpdateUpdateIndex(index int, fileID string) {
db.DB.Table("files_uploadeds").Where("file_id = ?", fileID).Updates(map[string]interface{}{
"Uploaded": index,
func (db *mySQLdb) UpdateUploadedChunk(index int64, fileID string) {
db.DB.Table("files").Where("id = ?", fileID).Updates(map[string]interface{}{
"Uploaded_chunk": index,
})
}
func (db *mySQLdb) FinalizeFileUpload(fileID string) {
db.DB.Table("files_uploadeds").Where("file_id = ?", fileID).Updates(map[string]interface{}{
db.DB.Table("files").Where("id = ?", fileID).Updates(map[string]interface{}{
"Done": true,
})
}
@ -313,32 +300,19 @@ func (db *postgresDB) GetFiles(ownerID string) ([]*models.File, error) {
return files, err
}
// CreateUploadInfo It's not optimal, but it's okay for now. Consider implementing caching instead of pushing all updates to the database for better performance in the future.
func (db *postgresDB) CreateUploadInfo(info models.FilesUploaded) error {
err := db.DB.Create(info).Error
if err != nil {
return err
func (db *postgresDB) UpdateUploadedByte(byte int64, fileID string) {
db.DB.Table("files").Where("id = $1", fileID).Updates(map[string]interface{}{
"Uploaded_byte": byte,
})
}
return nil
}
func (db *postgresDB) GetUploadInfo(fileID string) (*models.FilesUploaded, error) {
var info models.FilesUploaded
err := db.DB.Table("files_uploadeds").Where("file_id = $1", fileID).First(&info).Error
if err != nil {
return nil, err
}
return &info, nil
}
func (db *postgresDB) UpdateUpdateIndex(index int, fileID string) {
db.DB.Table("files_uploadeds").Where("file_id = $1", fileID).Updates(map[string]interface{}{
"Uploaded": index,
func (db *postgresDB) UpdateUploadedChunk(index int64, fileID string) {
db.DB.Table("files").Where("id = $1", fileID).Updates(map[string]interface{}{
"Uploaded_chunk": index,
})
}
func (db *postgresDB) FinalizeFileUpload(fileID string) {
db.DB.Table("files_uploadeds").Where("file_id = $1", fileID).Updates(map[string]interface{}{
db.DB.Table("files").Where("id = $1", fileID).Updates(map[string]interface{}{
"Done": true,
})
}

View File

@ -3,6 +3,8 @@ package initialisation
import (
"encoding/json"
"errors"
"fmt"
"github.com/fossyy/filekeeper/cache"
"io"
"net/http"
"os"
@ -37,7 +39,7 @@ func POST(w http.ResponseWriter, r *http.Request) {
return
}
fileData, err := db.DB.GetUserFile(fileInfo.Name, userSession.UserID.String())
fileData, err := cache.GetUserFile(fileInfo.Name, userSession.UserID.String())
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
upload, err := handleNewUpload(userSession, fileInfo)
@ -52,27 +54,22 @@ func POST(w http.ResponseWriter, r *http.Request) {
return
}
info, err := db.DB.GetUploadInfo(fileData.ID.String())
if err != nil {
log.Error(err.Error())
return
}
if info.Done {
if fileData.Done {
respondJSON(w, map[string]bool{"Done": true})
return
}
respondJSON(w, info)
fmt.Println("nih2 : ", fileData)
respondJSON(w, fileData)
}
func handleNewUpload(user types.User, file types.FileInfo) (models.FilesUploaded, error) {
func handleNewUpload(user types.User, file types.FileInfo) (models.File, error) {
uploadDir := "uploads"
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
log.Error(err.Error())
err := os.Mkdir(uploadDir, os.ModePerm)
if err != nil {
log.Error(err.Error())
return models.FilesUploaded{}, err
return models.File{}, err
}
}
@ -83,13 +80,13 @@ func handleNewUpload(user types.User, file types.FileInfo) (models.FilesUploaded
basePath := filepath.Join(currentDir, uploadDir)
saveFolder := filepath.Join(basePath, ownerID.String(), fileID.String())
if filepath.Dir(saveFolder) != filepath.Join(basePath, ownerID.String()) {
return models.FilesUploaded{}, errors.New("invalid path")
return models.File{}, errors.New("invalid path")
}
err := os.MkdirAll(saveFolder, os.ModePerm)
if err != nil {
log.Error(err.Error())
return models.FilesUploaded{}, err
return models.File{}, err
}
newFile := models.File{
@ -98,30 +95,18 @@ func handleNewUpload(user types.User, file types.FileInfo) (models.FilesUploaded
Name: file.Name,
Size: file.Size,
Downloaded: 0,
UploadedByte: 0,
UploadedChunk: -1,
Done: false,
}
err = db.DB.CreateFile(&newFile)
if err != nil {
log.Error(err.Error())
return models.FilesUploaded{}, err
return models.File{}, err
}
filesUploaded := models.FilesUploaded{
UploadID: uuid.New(),
FileID: fileID,
OwnerID: ownerID,
Name: file.Name,
Size: file.Size,
Uploaded: -1,
Done: false,
}
err = db.DB.CreateUploadInfo(filesUploaded)
if err != nil {
log.Error(err.Error())
return models.FilesUploaded{}, err
}
return filesUploaded, nil
return newFile, nil
}
func respondJSON(w http.ResponseWriter, data interface{}) {

View File

@ -1,22 +1,18 @@
package uploadHandler
import (
"errors"
"github.com/fossyy/filekeeper/db"
"github.com/fossyy/filekeeper/cache"
"github.com/fossyy/filekeeper/logger"
"github.com/fossyy/filekeeper/types"
filesView "github.com/fossyy/filekeeper/view/upload"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"sync"
"github.com/fossyy/filekeeper/logger"
filesView "github.com/fossyy/filekeeper/view/upload"
)
var log *logger.AggregatedLogger
var mu sync.Mutex
func init() {
log = logger.Logger()
@ -25,7 +21,7 @@ func init() {
func GET(w http.ResponseWriter, r *http.Request) {
component := filesView.Main("upload page")
if err := component.Render(r.Context(), w); err != nil {
handleError(w, err, http.StatusInternalServerError)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
@ -33,32 +29,31 @@ func GET(w http.ResponseWriter, r *http.Request) {
func POST(w http.ResponseWriter, r *http.Request) {
fileID := r.PathValue("id")
if err := r.ParseMultipartForm(32 << 20); err != nil {
handleError(w, err, http.StatusInternalServerError)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
userSession := r.Context().Value("user").(types.User)
if r.FormValue("done") == "true" {
db.DB.FinalizeFileUpload(fileID)
return
}
uploadDir := "uploads"
if err := createUploadDirectory(uploadDir); err != nil {
handleError(w, err, http.StatusInternalServerError)
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
if err := os.Mkdir(uploadDir, os.ModePerm); err != nil {
log.Error("error getting upload info: " + err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
file, err := db.DB.GetUploadInfo(fileID)
file, err := cache.GetFile(fileID)
if err != nil {
log.Error("error getting upload info: " + err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
currentDir, _ := os.Getwd()
basePath := filepath.Join(currentDir, uploadDir)
saveFolder := filepath.Join(basePath, userSession.UserID.String(), file.FileID.String())
saveFolder := filepath.Join(basePath, userSession.UserID.String(), file.ID.String())
if filepath.Dir(saveFolder) != filepath.Join(basePath, userSession.UserID.String()) {
log.Error("invalid path")
@ -66,49 +61,39 @@ func POST(w http.ResponseWriter, r *http.Request) {
return
}
fileByte, _, err := r.FormFile("chunk")
fileByte, fileHeader, err := r.FormFile("chunk")
if err != nil {
handleError(w, err, http.StatusInternalServerError)
log.Error("error getting upload info: " + err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer fileByte.Close()
dst, err := os.OpenFile(filepath.Join(saveFolder, file.Name), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
handleError(w, err, http.StatusInternalServerError)
return
}
defer dst.Close()
if _, err := io.Copy(dst, fileByte); err != nil {
handleError(w, err, http.StatusInternalServerError)
return
}
rawIndex := r.FormValue("index")
index, err := strconv.Atoi(rawIndex)
if err != nil {
return
}
db.DB.UpdateUpdateIndex(index, fileID)
}
func createUploadDirectory(uploadDir string) error {
if _, err := os.Stat(uploadDir); os.IsNotExist(err) {
if err := os.Mkdir(uploadDir, os.ModePerm); err != nil {
return err
}
}
return nil
}
file.UpdateProgress(int64(index), file.UploadedByte+int64(fileHeader.Size))
func handleCookieError(w http.ResponseWriter, r *http.Request, err error) {
if errors.Is(err, http.ErrNoCookie) {
http.Redirect(w, r, "/signin", http.StatusSeeOther)
dst, err := os.OpenFile(filepath.Join(saveFolder, file.Name), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Error("error making upload folder: " + err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
handleError(w, err, http.StatusInternalServerError)
defer dst.Close()
if _, err := io.Copy(dst, fileByte); err != nil {
log.Error("error copying byte to file dst: " + err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
func handleError(w http.ResponseWriter, err error, status int) {
http.Error(w, err.Error(), status)
log.Error(err.Error())
if file.UploadedByte >= file.Size {
file.FinalizeFileUpload()
return
}
return
}

View File

@ -32,7 +32,7 @@ async function handleFile(file){
if (responseData.Done === false) {
addNewUploadElement(file)
const fileChunks = await splitFile(file, chunkSize);
await uploadChunks(file.name,file.size, fileChunks, responseData.Uploaded, responseData.FileID);
await uploadChunks(file.name,file.size, fileChunks, responseData.UploadedChunk, responseData.ID);
} else {
alert("file already uploaded")
}
@ -125,10 +125,10 @@ async function splitFile(file, chunkSize) {
async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
let byteUploaded = 0
var progress1 = document.getElementById(`progress-${name}-1`);
var progress2 = document.getElementById(`progress-${name}-2`);
var progress3 = document.getElementById(`progress-${name}-3`);
var progress4 = document.getElementById(`progress-${name}-4`);
let progress1 = document.getElementById(`progress-${name}-1`);
let progress2 = document.getElementById(`progress-${name}-2`);
let progress3 = document.getElementById(`progress-${name}-3`);
let progress4 = document.getElementById(`progress-${name}-4`);
for (let index = 0; index < chunks.length; index++) {
const percentComplete = Math.round((index + 1) / chunks.length * 100);
const chunk = chunks[index];
@ -152,6 +152,7 @@ async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
const totalTime = (endTime - startTime) / 1000;
const uploadSpeed = chunk.size / totalTime / 1024 / 1024;
byteUploaded += chunk.size
console.log(byteUploaded)
progress3.innerText = `${uploadSpeed.toFixed(2)} MB/s`;
progress4.innerText = `Uploading ${percentComplete}% - ${convertFileSize(byteUploaded)} of ${ convertFileSize(size)}`;
} else {
@ -160,14 +161,4 @@ async function uploadChunks(name, size, chunks, uploadedChunk= -1, FileID) {
byteUploaded += chunk.size
}
}
const formData = new FormData();
formData.append('name', name);
formData.append('done', true);
return fetch(`/upload/${FileID}`, {
method: 'POST',
body: formData
});
}

View File

@ -1,3 +1,2 @@
CREATE TABLE IF NOT EXISTS users (user_id VARCHAR(255) PRIMARY KEY NOT NULL,username VARCHAR(255) UNIQUE NOT NULL,email VARCHAR(255) UNIQUE NOT NULL,password TEXT NOT NULL);
CREATE TABLE IF NOT EXISTS files (id VARCHAR(255) PRIMARY KEY NOT NULL,owner_id VARCHAR(255) NOT NULL,name TEXT NOT NULL,size BIGINT NOT NULL,downloaded BIGINT NOT NULL,FOREIGN KEY (owner_id) REFERENCES users(user_id));
CREATE TABLE IF NOT EXISTS files_uploadeds (upload_id VARCHAR(255) PRIMARY KEY NOT NULL,file_id VARCHAR(255) NOT NULL,owner_id VARCHAR(255) NOT NULL,name TEXT NOT NULL,size INT NOT NULL,uploaded INT NOT NULL DEFAULT 0,done BOOLEAN NOT NULL DEFAULT FALSE,FOREIGN KEY (file_id) REFERENCES files(id),FOREIGN KEY (owner_id) REFERENCES users(user_id));
CREATE TABLE IF NOT EXISTS files (id VARCHAR(255) PRIMARY KEY NOT NULL,owner_id VARCHAR(255) NOT NULL,name TEXT NOT NULL,size BIGINT NOT NULL,downloaded BIGINT NOT NULL,uploaded_byte BIGINT NOT NULL DEFAULT 0, uploaded_chunk BIGINT NOT NULL DEFAULT -1,done BOOLEAN NOT NULL DEFAULT FALSE,FOREIGN KEY (owner_id) REFERENCES users(user_id));

View File

@ -13,16 +13,9 @@ type File struct {
ID uuid.UUID `gorm:"primaryKey;not null;unique"`
OwnerID uuid.UUID `gorm:"not null"`
Name string `gorm:"not null"`
Size int `gorm:"not null"`
Downloaded int `gorm:"not null;default=0"`
}
type FilesUploaded struct {
UploadID uuid.UUID `gorm:"primaryKey;not null;unique"`
FileID uuid.UUID `gorm:"not null"`
OwnerID uuid.UUID `gorm:"not null"`
Name string `gorm:"not null"`
Size int `gorm:"not null"`
Uploaded int `gorm:"not null;default=0"`
Size int64 `gorm:"not null"`
Downloaded int64 `gorm:"not null;default=0"`
UploadedByte int64 `gorm:"not null;default=0"`
UploadedChunk int64 `gorm:"not null;default=0"`
Done bool `gorm:"not null;default=false"`
}

View File

@ -18,20 +18,13 @@ type User struct {
type FileInfo struct {
Name string `json:"name"`
Size int `json:"size"`
Chunk int `json:"chunk"`
}
type FileInfoUploaded struct {
Name string `json:"name"`
Size int `json:"size"`
Chunk int `json:"chunk"`
UploadedChunk int `json:"uploaded_chunk"`
Size int64 `json:"size"`
Chunk int64 `json:"chunk"`
}
type FileData struct {
ID string
Name string
Size string
Downloaded int
Downloaded int64
}

View File

@ -97,7 +97,7 @@ func ValidatePassword(password string) bool {
return hasSymbol && hasNumber >= 3 && hasUppercase
}
func ConvertFileSize(byte int) string {
func ConvertFileSize(byte int64) string {
if byte < 1024 {
return fmt.Sprintf("%d B", byte)
} else if byte < 1024*1024 {