fix: share local hls file cleanup between s3 and filesystem providers. Fixes #3522 (#3531)

This commit is contained in:
Gabe Kangas 2024-01-22 20:26:36 -08:00 committed by GitHub
parent 8a90e86c5b
commit 841c300431
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 51 additions and 42 deletions

View File

@ -5,10 +5,8 @@ import (
"path/filepath"
"sort"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/owncast/owncast/config"
"github.com/owncast/owncast/core/data"
)
@ -62,36 +60,12 @@ func (s *LocalStorage) Save(filePath string, retryCount int) (string, error) {
return filePath, nil
}
// Cleanup will remove old files from the storage provider.
func (s *LocalStorage) Cleanup() error {
// Determine how many files we should keep on disk
maxNumber := data.GetStreamLatencyLevel().SegmentCount
buffer := 10
baseDirectory := config.HLSStoragePath
files, err := getAllFilesRecursive(baseDirectory)
if err != nil {
return errors.Wrap(err, "unable find old video files for cleanup")
}
// Delete old private HLS files on disk
for directory := range files {
files := files[directory]
if len(files) < maxNumber+buffer {
continue
}
filesToDelete := files[maxNumber+buffer:]
log.Traceln("Deleting", len(filesToDelete), "old files from", baseDirectory, "for video variant", directory)
for _, file := range filesToDelete {
fileToDelete := filepath.Join(baseDirectory, directory, file.Name())
err := os.Remove(fileToDelete)
if err != nil {
return errors.Wrap(err, "unable to delete old video files")
}
}
}
return nil
return localCleanup(maxNumber + buffer)
}
func getAllFilesRecursive(baseDirectory string) (map[string][]os.FileInfo, error) {

View File

@ -0,0 +1,39 @@
package storageproviders
import (
"os"
"path/filepath"
"github.com/owncast/owncast/config"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func localCleanup(maxNumber int) error {
baseDirectory := config.HLSStoragePath
files, err := getAllFilesRecursive(baseDirectory)
if err != nil {
return errors.Wrap(err, "unable find old video files for cleanup")
}
// Delete old private HLS files on disk
for directory := range files {
files := files[directory]
if len(files) < maxNumber {
continue
}
filesToDelete := files[maxNumber:]
log.Traceln("Deleting", len(filesToDelete), "old files from", baseDirectory, "for video variant", directory)
for _, file := range filesToDelete {
fileToDelete := filepath.Join(baseDirectory, directory, file.Name())
err := os.Remove(fileToDelete)
if err != nil {
return errors.Wrap(err, "unable to delete old video files")
}
}
}
return nil
}

View File

@ -202,19 +202,23 @@ func (s *S3Storage) Save(filePath string, retryCount int) (string, error) {
return s.Save(filePath, retryCount+1)
}
// Upload failure. Remove the local file.
s.removeLocalFile(filePath)
return "", fmt.Errorf("Giving up uploading %s to object storage %s", filePath, s.s3Endpoint)
}
// Upload success. Remove the local file.
s.removeLocalFile(filePath)
return response.Location, nil
}
// Cleanup will fire the different cleanup tasks required.
func (s *S3Storage) Cleanup() error {
if err := s.RemoteCleanup(); err != nil {
log.Errorln(err)
}
return localCleanup(4)
}
// RemoteCleanup will remove old files from the remote storage provider.
func (s *S3Storage) RemoteCleanup() error {
// Determine how many files we should keep on S3 storage
maxNumber := data.GetStreamLatencyLevel().SegmentCount
buffer := 20
@ -276,14 +280,6 @@ func (s *S3Storage) getDeletableVideoSegmentsWithOffset(offset int) ([]s3object,
return objectsToDelete, nil
}
func (s *S3Storage) removeLocalFile(filePath string) {
cleanFilepath := filepath.Clean(filePath)
if err := os.Remove(cleanFilepath); err != nil {
log.Errorln(err)
}
}
func (s *S3Storage) deleteObjects(objects []s3object) {
keys := make([]*s3.ObjectIdentifier, len(objects))
for i, object := range objects {