chore(go): run betteralign and gofumpt on codebase
This commit is contained in:
parent
a31179b604
commit
8e79e2acfa
@ -29,8 +29,8 @@ func VerifyFFMpegPath(path string) error {
|
||||
}
|
||||
|
||||
mode := stat.Mode()
|
||||
//source: https://stackoverflow.com/a/60128480
|
||||
if mode&0111 == 0 {
|
||||
// source: https://stackoverflow.com/a/60128480
|
||||
if mode&0o111 == 0 {
|
||||
return errors.New("ffmpeg path is not executable")
|
||||
}
|
||||
|
||||
|
@ -17,25 +17,25 @@ import (
|
||||
|
||||
type webConfigResponse struct {
|
||||
AppearanceVariables map[string]string `json:"appearanceVariables"`
|
||||
Notifications notificationsConfigResponse `json:"notifications"`
|
||||
Name string `json:"name"`
|
||||
CustomStyles string `json:"customStyles"`
|
||||
Summary string `json:"summary"`
|
||||
StreamTitle string `json:"streamTitle,omitempty"` // What's going on with the current stream
|
||||
OfflineMessage string `json:"offlineMessage"`
|
||||
Logo string `json:"logo"`
|
||||
Version string `json:"version"`
|
||||
SocketHostOverride string `json:"socketHostOverride,omitempty"`
|
||||
ExtraPageContent string `json:"extraPageContent"`
|
||||
StreamTitle string `json:"streamTitle,omitempty"` // What's going on with the current stream
|
||||
Name string `json:"name"`
|
||||
Federation federationConfigResponse `json:"federation"`
|
||||
Summary string `json:"summary"`
|
||||
Tags []string `json:"tags"`
|
||||
SocialHandles []models.SocialHandle `json:"socialHandles"`
|
||||
ExternalActions []models.ExternalAction `json:"externalActions"`
|
||||
Tags []string `json:"tags"`
|
||||
Notifications notificationsConfigResponse `json:"notifications"`
|
||||
Federation federationConfigResponse `json:"federation"`
|
||||
MaxSocketPayloadSize int `json:"maxSocketPayloadSize"`
|
||||
HideViewerCount bool `json:"hideViewerCount"`
|
||||
ChatDisabled bool `json:"chatDisabled"`
|
||||
NSFW bool `json:"nsfw"`
|
||||
Authentication authenticationConfigResponse `json:"authentication"`
|
||||
HideViewerCount bool `json:"hideViewerCount"`
|
||||
}
|
||||
|
||||
type federationConfigResponse struct {
|
||||
|
@ -4,6 +4,6 @@ import "github.com/owncast/owncast/core/user"
|
||||
|
||||
// ConnectedClientInfo represents the information about a connected client.
|
||||
type ConnectedClientInfo struct {
|
||||
Event
|
||||
User *user.User `json:"user"`
|
||||
Event
|
||||
}
|
||||
|
@ -35,14 +35,14 @@ type Server struct {
|
||||
// unregister requests from clients.
|
||||
unregister chan uint // the ChatClient id
|
||||
|
||||
geoipClient *geoip.Client
|
||||
geoipClient *geoip.Client
|
||||
|
||||
// a map of user IDs and timers that fire for chat part messages.
|
||||
userPartedTimers map[string]*time.Ticker
|
||||
seq uint
|
||||
maxSocketConnectionLimit int64
|
||||
|
||||
mu sync.RWMutex
|
||||
|
||||
// a map of user IDs and timers that fire for chat part messages.
|
||||
userPartedTimers map[string]*time.Ticker
|
||||
}
|
||||
|
||||
// NewChat will return a new instance of the chat server.
|
||||
|
@ -622,8 +622,8 @@ func VerifySettings() error {
|
||||
// FindHighestVideoQualityIndex will return the highest quality from a slice of variants.
|
||||
func FindHighestVideoQualityIndex(qualities []models.StreamOutputVariant) int {
|
||||
type IndexedQuality struct {
|
||||
index int
|
||||
quality models.StreamOutputVariant
|
||||
index int
|
||||
}
|
||||
|
||||
if len(qualities) < 2 {
|
||||
@ -632,7 +632,7 @@ func FindHighestVideoQualityIndex(qualities []models.StreamOutputVariant) int {
|
||||
|
||||
indexedQualities := make([]IndexedQuality, 0)
|
||||
for index, quality := range qualities {
|
||||
indexedQuality := IndexedQuality{index, quality}
|
||||
indexedQuality := IndexedQuality{quality, index}
|
||||
indexedQualities = append(indexedQualities, indexedQuality)
|
||||
}
|
||||
|
||||
|
@ -8,8 +8,8 @@ import (
|
||||
// ConfigEntry is the actual object saved to the database.
|
||||
// The Value is encoded using encoding/gob.
|
||||
type ConfigEntry struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
Key string
|
||||
}
|
||||
|
||||
func (c *ConfigEntry) getStringSlice() ([]string, error) {
|
||||
|
@ -20,6 +20,8 @@ func TestString(t *testing.T) {
|
||||
const testKey = "test string key"
|
||||
const testValue = "test string value"
|
||||
|
||||
fmt.Println(testKey, testValue)
|
||||
|
||||
if err := _datastore.SetString(testKey, testValue); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -87,7 +89,7 @@ func TestCustomType(t *testing.T) {
|
||||
}
|
||||
|
||||
// Save config entry to the database
|
||||
if err := _datastore.Save(ConfigEntry{testKey, &testStruct}); err != nil {
|
||||
if err := _datastore.Save(ConfigEntry{&testStruct, testKey}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@ -119,7 +121,7 @@ func TestStringMap(t *testing.T) {
|
||||
}
|
||||
|
||||
// Save config entry to the database
|
||||
if err := _datastore.Save(ConfigEntry{testKey, &testMap}); err != nil {
|
||||
if err := _datastore.Save(ConfigEntry{&testMap, testKey}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
|
@ -17,9 +17,11 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var emojiCacheMu sync.Mutex
|
||||
var emojiCacheData = make([]models.CustomEmoji, 0)
|
||||
var emojiCacheModTime time.Time
|
||||
var (
|
||||
emojiCacheMu sync.Mutex
|
||||
emojiCacheData = make([]models.CustomEmoji, 0)
|
||||
emojiCacheModTime time.Time
|
||||
)
|
||||
|
||||
// UpdateEmojiList will update the cache (if required) and
|
||||
// return the modifiation time.
|
||||
|
@ -11,7 +11,7 @@ func (ds *Datastore) GetStringSlice(key string) ([]string, error) {
|
||||
|
||||
// SetStringSlice will set the string slice value for a key.
|
||||
func (ds *Datastore) SetStringSlice(key string, value []string) error {
|
||||
configEntry := ConfigEntry{key, value}
|
||||
configEntry := ConfigEntry{value, key}
|
||||
return ds.Save(configEntry)
|
||||
}
|
||||
|
||||
@ -26,7 +26,7 @@ func (ds *Datastore) GetString(key string) (string, error) {
|
||||
|
||||
// SetString will set the string value for a key.
|
||||
func (ds *Datastore) SetString(key string, value string) error {
|
||||
configEntry := ConfigEntry{key, value}
|
||||
configEntry := ConfigEntry{value, key}
|
||||
return ds.Save(configEntry)
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ func (ds *Datastore) GetNumber(key string) (float64, error) {
|
||||
|
||||
// SetNumber will set the numeric value for a key.
|
||||
func (ds *Datastore) SetNumber(key string, value float64) error {
|
||||
configEntry := ConfigEntry{key, value}
|
||||
configEntry := ConfigEntry{value, key}
|
||||
return ds.Save(configEntry)
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ func (ds *Datastore) GetBool(key string) (bool, error) {
|
||||
|
||||
// SetBool will set the boolean value for a key.
|
||||
func (ds *Datastore) SetBool(key string, value bool) error {
|
||||
configEntry := ConfigEntry{key, value}
|
||||
configEntry := ConfigEntry{value, key}
|
||||
return ds.Save(configEntry)
|
||||
}
|
||||
|
||||
@ -71,6 +71,6 @@ func (ds *Datastore) GetStringMap(key string) (map[string]string, error) {
|
||||
|
||||
// SetStringMap will set the string map value for a key.
|
||||
func (ds *Datastore) SetStringMap(key string, value map[string]string) error {
|
||||
configEntry := ConfigEntry{key, value}
|
||||
configEntry := ConfigEntry{value, key}
|
||||
return ds.Save(configEntry)
|
||||
}
|
||||
|
@ -28,23 +28,25 @@ import (
|
||||
type S3Storage struct {
|
||||
sess *session.Session
|
||||
s3Client *s3.S3
|
||||
host string
|
||||
|
||||
s3Endpoint string
|
||||
s3ServingEndpoint string
|
||||
s3Region string
|
||||
s3Bucket string
|
||||
s3AccessKey string
|
||||
s3Secret string
|
||||
s3ACL string
|
||||
s3PathPrefix string
|
||||
s3ForcePathStyle bool
|
||||
uploader *s3manager.Uploader
|
||||
|
||||
// If we try to upload a playlist but it is not yet on disk
|
||||
// then keep a reference to it here.
|
||||
queuedPlaylistUpdates map[string]string
|
||||
|
||||
uploader *s3manager.Uploader
|
||||
s3Bucket string
|
||||
s3Region string
|
||||
s3ServingEndpoint string
|
||||
s3AccessKey string
|
||||
s3Secret string
|
||||
s3ACL string
|
||||
s3PathPrefix string
|
||||
|
||||
s3Endpoint string
|
||||
host string
|
||||
|
||||
s3ForcePathStyle bool
|
||||
}
|
||||
|
||||
// NewS3Storage returns a new S3Storage instance.
|
||||
@ -330,6 +332,6 @@ func (s *S3Storage) retrieveAllVideoSegments() ([]s3object, error) {
|
||||
}
|
||||
|
||||
type s3object struct {
|
||||
key string
|
||||
lastModified time.Time
|
||||
key string
|
||||
}
|
||||
|
@ -13,8 +13,10 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _lastTranscoderLogMessage = ""
|
||||
var l = &sync.RWMutex{}
|
||||
var (
|
||||
_lastTranscoderLogMessage = ""
|
||||
l = &sync.RWMutex{}
|
||||
)
|
||||
|
||||
var errorMap = map[string]string{
|
||||
"Unrecognized option 'vaapi_device'": "you are likely trying to utilize a vaapi codec, but your version of ffmpeg or your hardware doesn't support it. change your codec to libx264 and restart your stream",
|
||||
@ -100,14 +102,14 @@ func createVariantDirectories() {
|
||||
|
||||
if len(data.GetStreamOutputVariants()) != 0 {
|
||||
for index := range data.GetStreamOutputVariants() {
|
||||
if err := os.MkdirAll(path.Join(config.HLSStoragePath, strconv.Itoa(index)), 0750); err != nil {
|
||||
if err := os.MkdirAll(path.Join(config.HLSStoragePath, strconv.Itoa(index)), 0o750); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dir := path.Join(config.HLSStoragePath, strconv.Itoa(0))
|
||||
log.Traceln("Creating", dir)
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
if err := os.MkdirAll(dir, 0o750); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
@ -18,9 +18,9 @@ var webhookWorkerPoolSize = runtime.GOMAXPROCS(0)
|
||||
|
||||
// Job struct bundling the webhook and the payload in one struct.
|
||||
type Job struct {
|
||||
webhook models.Webhook
|
||||
payload WebhookEvent
|
||||
wg *sync.WaitGroup
|
||||
payload WebhookEvent
|
||||
webhook models.Webhook
|
||||
}
|
||||
|
||||
var (
|
||||
@ -46,7 +46,7 @@ func initWorkerPool() {
|
||||
|
||||
func addToQueue(webhook models.Webhook, payload WebhookEvent, wg *sync.WaitGroup) {
|
||||
log.Tracef("Queued Event %s for Webhook %s", payload.Type, webhook.URL)
|
||||
queue <- Job{webhook, payload, wg}
|
||||
queue <- Job{wg, payload, webhook}
|
||||
}
|
||||
|
||||
func worker(workerID int, queue <-chan Job) {
|
||||
|
@ -76,7 +76,7 @@ func (c *Client) fetchGeoForIP(ip string) *GeoDetails {
|
||||
// If no country is available then exit
|
||||
// If we believe this IP to be anonymous then no reason to report it
|
||||
if record.Country.IsoCode != "" && !record.Traits.IsAnonymousProxy {
|
||||
var regionName = "Unknown"
|
||||
regionName := "Unknown"
|
||||
if len(record.Subdivisions) > 0 {
|
||||
if region, ok := record.Subdivisions[0].Names["en"]; ok {
|
||||
regionName = region
|
||||
|
@ -34,7 +34,7 @@ func Setup(enableDebugOptions bool, enableVerboseLogging bool) {
|
||||
// Create the logging directory if needed
|
||||
loggingDirectory := filepath.Dir(getLogFilePath())
|
||||
if !utils.DoesFileExists(loggingDirectory) {
|
||||
if err := os.Mkdir(loggingDirectory, 0700); err != nil {
|
||||
if err := os.Mkdir(loggingDirectory, 0o700); err != nil {
|
||||
logger.Errorln("unable to create logs directory", loggingDirectory, err)
|
||||
}
|
||||
}
|
||||
|
@ -10,8 +10,10 @@ import (
|
||||
)
|
||||
|
||||
// How often we poll for updates.
|
||||
const hardwareMetricsPollingInterval = 2 * time.Minute
|
||||
const playbackMetricsPollingInterval = 2 * time.Minute
|
||||
const (
|
||||
hardwareMetricsPollingInterval = 2 * time.Minute
|
||||
playbackMetricsPollingInterval = 2 * time.Minute
|
||||
)
|
||||
|
||||
const (
|
||||
// How often we poll for updates.
|
||||
|
@ -2,14 +2,12 @@ package models
|
||||
|
||||
// S3 is the storage configuration.
|
||||
type S3 struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
AccessKey string `json:"accessKey,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
ACL string `json:"acl,omitempty"`
|
||||
ForcePathStyle bool `json:"forcePathStyle"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
AccessKey string `json:"accessKey,omitempty"`
|
||||
Secret string `json:"secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
ACL string `json:"acl,omitempty"`
|
||||
|
||||
// PathPrefix is an optional prefix for object storage.
|
||||
PathPrefix string `json:"pathPrefix,omitempty"`
|
||||
@ -18,4 +16,6 @@ type S3 struct {
|
||||
// property that was pulled out of here instead. It's only left here
|
||||
// to allow the migration to take place without data loss.
|
||||
ServingEndpoint string `json:"-"`
|
||||
Enabled bool `json:"enabled"`
|
||||
ForcePathStyle bool `json:"forcePathStyle"`
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ func getPatternForRestEndpoint(pattern string) string {
|
||||
}
|
||||
|
||||
func zip2D(iterable1 *[]string, iterable2 *[]string) map[string]string {
|
||||
var dict = make(map[string]string)
|
||||
dict := make(map[string]string)
|
||||
for index, key := range *iterable1 {
|
||||
dict[key] = (*iterable2)[index]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user