From 841c3004314b234b4c1ab17ba67100427c69a35f Mon Sep 17 00:00:00 2001 From: Gabe Kangas Date: Mon, 22 Jan 2024 20:26:36 -0800 Subject: [PATCH] fix: share local hls file cleanup between s3 and filesystem providers. Fixes #3522 (#3531) --- core/storageproviders/local.go | 30 ++------------------- core/storageproviders/localCleanup.go | 39 +++++++++++++++++++++++++++ core/storageproviders/s3Storage.go | 24 +++++++---------- 3 files changed, 51 insertions(+), 42 deletions(-) create mode 100644 core/storageproviders/localCleanup.go diff --git a/core/storageproviders/local.go b/core/storageproviders/local.go index 65bcaa907..b7baabb72 100644 --- a/core/storageproviders/local.go +++ b/core/storageproviders/local.go @@ -5,10 +5,8 @@ import ( "path/filepath" "sort" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "github.com/owncast/owncast/config" "github.com/owncast/owncast/core/data" ) @@ -62,36 +60,12 @@ func (s *LocalStorage) Save(filePath string, retryCount int) (string, error) { return filePath, nil } +// Cleanup will remove old files from the storage provider. func (s *LocalStorage) Cleanup() error { // Determine how many files we should keep on disk maxNumber := data.GetStreamLatencyLevel().SegmentCount buffer := 10 - baseDirectory := config.HLSStoragePath - - files, err := getAllFilesRecursive(baseDirectory) - if err != nil { - return errors.Wrap(err, "unable find old video files for cleanup") - } - - // Delete old private HLS files on disk - for directory := range files { - files := files[directory] - if len(files) < maxNumber+buffer { - continue - } - - filesToDelete := files[maxNumber+buffer:] - log.Traceln("Deleting", len(filesToDelete), "old files from", baseDirectory, "for video variant", directory) - - for _, file := range filesToDelete { - fileToDelete := filepath.Join(baseDirectory, directory, file.Name()) - err := os.Remove(fileToDelete) - if err != nil { - return errors.Wrap(err, "unable to delete old video files") - } - } - } - return nil + return localCleanup(maxNumber + buffer) } func getAllFilesRecursive(baseDirectory string) (map[string][]os.FileInfo, error) { diff --git a/core/storageproviders/localCleanup.go b/core/storageproviders/localCleanup.go new file mode 100644 index 000000000..7e655d4d0 --- /dev/null +++ b/core/storageproviders/localCleanup.go @@ -0,0 +1,39 @@ +package storageproviders + +import ( + "os" + "path/filepath" + + "github.com/owncast/owncast/config" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +func localCleanup(maxNumber int) error { + baseDirectory := config.HLSStoragePath + + files, err := getAllFilesRecursive(baseDirectory) + if err != nil { + return errors.Wrap(err, "unable find old video files for cleanup") + } + + // Delete old private HLS files on disk + for directory := range files { + files := files[directory] + if len(files) < maxNumber { + continue + } + + filesToDelete := files[maxNumber:] + log.Traceln("Deleting", len(filesToDelete), "old files from", baseDirectory, "for video variant", directory) + + for _, file := range filesToDelete { + fileToDelete := filepath.Join(baseDirectory, directory, file.Name()) + err := os.Remove(fileToDelete) + if err != nil { + return errors.Wrap(err, "unable to delete old video files") + } + } + } + return nil +} diff --git a/core/storageproviders/s3Storage.go b/core/storageproviders/s3Storage.go index b5f8b330f..c74c025e7 100644 --- a/core/storageproviders/s3Storage.go +++ b/core/storageproviders/s3Storage.go @@ -202,19 +202,23 @@ func (s *S3Storage) Save(filePath string, retryCount int) (string, error) { return s.Save(filePath, retryCount+1) } - // Upload failure. Remove the local file. - s.removeLocalFile(filePath) - return "", fmt.Errorf("Giving up uploading %s to object storage %s", filePath, s.s3Endpoint) } - // Upload success. Remove the local file. - s.removeLocalFile(filePath) - return response.Location, nil } +// Cleanup will fire the different cleanup tasks required. func (s *S3Storage) Cleanup() error { + if err := s.RemoteCleanup(); err != nil { + log.Errorln(err) + } + + return localCleanup(4) +} + +// RemoteCleanup will remove old files from the remote storage provider. +func (s *S3Storage) RemoteCleanup() error { // Determine how many files we should keep on S3 storage maxNumber := data.GetStreamLatencyLevel().SegmentCount buffer := 20 @@ -276,14 +280,6 @@ func (s *S3Storage) getDeletableVideoSegmentsWithOffset(offset int) ([]s3object, return objectsToDelete, nil } -func (s *S3Storage) removeLocalFile(filePath string) { - cleanFilepath := filepath.Clean(filePath) - - if err := os.Remove(cleanFilepath); err != nil { - log.Errorln(err) - } -} - func (s *S3Storage) deleteObjects(objects []s3object) { keys := make([]*s3.ObjectIdentifier, len(objects)) for i, object := range objects {