diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000..9db384445d --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,57 @@ +version: "2" + +# Here is the full list of customizable features +# https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +linters: + enable: + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. + - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. + - govet # Vet examines Go source code and reports suspicious constructs + - ineffassign # Detects when assignments to existing variables are not used + - misspell # Finds commonly misspelled English words in comments. + - predeclared # Find code that shadows one of Go's predeclared identifiers. + - staticcheck # Set of rules from staticcheck. + - testifylint # Checks usage of github.com/stretchr/testify. + - unused # Checks Go code for unused constants, variables, functions and types + settings: + testifylint: + # Enable all checkers (https://github.com/Antonboom/testifylint#checkers). + enable-all: true + disable: + - require-error + - suite-thelper + staticcheck: + checks: ["all", "-ST1003", "-ST1005"] + + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ + - component/azstorage/config.go + - common/version.go + rules: + - path: component/libfuse/libfuse2_handler_test_wrapper.go + text: "(\\w) (\\w+|\\(\\*\\w+\\)\\.\\w+) is unused" + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + +formatters: + enable: + - gofmt + - goimports + - golines + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ \ No newline at end of file diff --git a/blobfuse2-1es_ci.yaml b/blobfuse2-1es_ci.yaml index e27170e361..3c457a9f4b 100644 --- a/blobfuse2-1es_ci.yaml +++ b/blobfuse2-1es_ci.yaml @@ -90,9 +90,9 @@ extends: - script: | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin $(go env GOPATH)/bin/golangci-lint --version - $(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags $(tags) --exclude-dirs test,common/stats_collector,common/stats_monitor --max-issues-per-linter=0 --exclude-files component/libfuse/libfuse2_handler_test_wrapper.go,component/libfuse/libfuse_handler_test_wrapper.go > lint.log + $(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags $(tags) > lint.log result=$(cat lint.log | wc -l) - if [ $result -ne 0 ]; then + if [ $result -ne 1 ]; then echo "-----------------------------------" echo "Below issues are found in SA" cat lint.log diff --git a/cmd/doc.go b/cmd/doc.go index 3841200e60..2d180c375d 100644 --- a/cmd/doc.go +++ b/cmd/doc.go @@ -69,7 +69,10 @@ var docCmd = &cobra.Command{ // it will include this command too, which is intended err = doc.GenMarkdownTree(rootCmd, docCmdInput.outputLocation) if err != nil { - return fmt.Errorf("cannot generate command tree [%s]. Please contact the dev team", err.Error()) + return fmt.Errorf( + "cannot generate command tree [%s]. Please contact the dev team", + err.Error(), + ) } return nil }, diff --git a/cmd/gen-config.go b/cmd/gen-config.go index a3310453ba..4f05cb238d 100644 --- a/cmd/gen-config.go +++ b/cmd/gen-config.go @@ -46,10 +46,10 @@ import ( type genConfigParams struct { blockCache bool `config:"block-cache" yaml:"block-cache,omitempty"` - directIO bool `config:"direct-io" yaml:"direct-io,omitempty"` - readOnly bool `config:"ro" yaml:"ro,omitempty"` - tmpPath string `config:"tmp-path" yaml:"tmp-path,omitempty"` - outputFile string `config:"o" yaml:"o,omitempty"` + directIO bool `config:"direct-io" yaml:"direct-io,omitempty"` + readOnly bool `config:"ro" yaml:"ro,omitempty"` + tmpPath string `config:"tmp-path" yaml:"tmp-path,omitempty"` + outputFile string `config:"o" yaml:"o,omitempty"` } var optsGenCfg genConfigParams @@ -66,7 +66,9 @@ var generatedConfig = &cobra.Command{ // Check if configTmp is not provided when component is fc if (!optsGenCfg.blockCache) && optsGenCfg.tmpPath == "" { - return fmt.Errorf("temp path is required for file cache mode. Use flag --tmp-path to provide the path") + return fmt.Errorf( + "temp path is required for file cache mode. Use flag --tmp-path to provide the path", + ) } // Set the configs @@ -103,8 +105,12 @@ var generatedConfig = &cobra.Command{ sb.WriteString("read-only: true\n\n") } - sb.WriteString("# Logger configuration\n#logging:\n # type: syslog|silent|base\n # level: log_off|log_crit|log_err|log_warning|log_info|log_trace|log_debug\n") - sb.WriteString(" # file-path: \n") + sb.WriteString( + "# Logger configuration\n#logging:\n # type: syslog|silent|base\n # level: log_off|log_crit|log_err|log_warning|log_info|log_trace|log_debug\n", + ) + sb.WriteString( + " # file-path: \n", + ) sb.WriteString("\ncomponents:\n") for _, component := range pipeline { @@ -120,8 +126,12 @@ var generatedConfig = &cobra.Command{ sb.WriteString(c.GenConfig()) } - sb.WriteString("\n#Required\n#azstorage:\n # type: block|adls \n # account-name: \n # container: \n # endpoint: \n ") - sb.WriteString("# mode: key|sas|spn|msi|azcli \n # account-key: \n # OR\n # sas: \n # OR\n # appid: \n # OR\n # tenantid: \n # container: \n # endpoint: \n ", + ) + sb.WriteString( + "# mode: key|sas|spn|msi|azcli \n # account-key: \n # OR\n # sas: \n # OR\n # appid: \n # OR\n # tenantid: 0 { - options.Components = append(options.Components[:1], append([]string{"entry_cache"}, options.Components[1:]...)...) + options.Components = append( + options.Components[:1], + append([]string{"entry_cache"}, options.Components[1:]...)...) } if err = common.ValidatePipeline(options.Components); err != nil { @@ -439,13 +450,19 @@ var mountCmd = &cobra.Command{ } if config.IsSet("invalidate-on-sync") { - log.Warn("mount: unsupported v1 CLI parameter: invalidate-on-sync is always true in blobfuse2.") + log.Warn( + "mount: unsupported v1 CLI parameter: invalidate-on-sync is always true in blobfuse2.", + ) } if config.IsSet("pre-mount-validate") { - log.Warn("mount: unsupported v1 CLI parameter: pre-mount-validate is always true in blobfuse2.") + log.Warn( + "mount: unsupported v1 CLI parameter: pre-mount-validate is always true in blobfuse2.", + ) } if config.IsSet("basic-remount-check") { - log.Warn("mount: unsupported v1 CLI parameter: basic-remount-check is always true in blobfuse2.") + log.Warn( + "mount: unsupported v1 CLI parameter: basic-remount-check is always true in blobfuse2.", + ) } common.EnableMonitoring = options.MonitorOpt.EnableMon @@ -459,7 +476,11 @@ var mountCmd = &cobra.Command{ var pipeline *internal.Pipeline - log.Crit("Starting Blobfuse2 Mount : %s on [%s]", common.Blobfuse2Version, common.GetCurrentDistro()) + log.Crit( + "Starting Blobfuse2 Mount : %s on [%s]", + common.Blobfuse2Version, + common.GetCurrentDistro(), + ) log.Info("Mount Command: %s", os.Args) log.Crit("Logging level set to : %s", logLevel.String()) log.Debug("Mount allowed on nonempty path : %v", options.NonEmpty) @@ -469,7 +490,9 @@ var mountCmd = &cobra.Command{ for i, name := range options.Components { if name == "attr_cache" { options.Components = append(options.Components[:i], options.Components[i+1:]...) - log.Crit("Mount::runPipeline : Direct IO enabled, removing attr_cache from pipeline") + log.Crit( + "Mount::runPipeline : Direct IO enabled, removing attr_cache from pipeline", + ) break } } @@ -492,7 +515,7 @@ var mountCmd = &cobra.Command{ log.Info("mount: Mounting blobfuse2 on %s", options.MountPath) if !options.Foreground { - pidFile := strings.Replace(options.MountPath, "/", "_", -1) + ".pid" + pidFile := strings.ReplaceAll(options.MountPath, "/", "_") + ".pid" pidFileName := filepath.Join(os.ExpandEnv(common.DefaultWorkDir), pidFile) pid := os.Getpid() @@ -525,7 +548,10 @@ var mountCmd = &cobra.Command{ // a cleanup of the .pid file. If cleanup goes through then retry the daemonization. child, err := dmnCtx.Reborn() if err != nil { - log.Err("mount : failed to daemonize application [%s], trying auto cleanup", err.Error()) + log.Err( + "mount : failed to daemonize application [%s], trying auto cleanup", + err.Error(), + ) rmErr := os.Remove(pidFileName) if rmErr != nil { log.Err("mount : auto cleanup failed [%v]", rmErr.Error()) @@ -652,7 +678,12 @@ func runPipeline(pipeline *internal.Pipeline, ctx context.Context) error { pid := fmt.Sprintf("%v", os.Getpid()) common.TransferPipe += "_" + pid common.PollingPipe += "_" + pid - log.Debug("Mount::runPipeline : blobfuse2 pid = %v, transfer pipe = %v, polling pipe = %v", pid, common.TransferPipe, common.PollingPipe) + log.Debug( + "Mount::runPipeline : blobfuse2 pid = %v, transfer pipe = %v, polling pipe = %v", + pid, + common.TransferPipe, + common.PollingPipe, + ) go startMonitor(os.Getpid()) @@ -678,7 +709,13 @@ func startMonitor(pid int) { buf := new(bytes.Buffer) rootCmd.SetOut(buf) rootCmd.SetErr(buf) - rootCmd.SetArgs([]string{"health-monitor", fmt.Sprintf("--pid=%v", pid), fmt.Sprintf("--config-file=%s", options.ConfigFile)}) + rootCmd.SetArgs( + []string{ + "health-monitor", + fmt.Sprintf("--pid=%v", pid), + fmt.Sprintf("--config-file=%s", options.ConfigFile), + }, + ) err := rootCmd.Execute() if err != nil { common.EnableMonitoring = false @@ -725,7 +762,12 @@ func cleanupCachePath(componentName string, globalCleanupFlag bool) error { // Clean up if either global or component-specific flag is set if globalCleanupFlag || componentCleanupFlag { if err := common.TempCacheCleanup(cachePath); err != nil { - return fmt.Errorf("failed to cleanup temp cache path: %s for %s component: %v", cachePath, componentName, err) + return fmt.Errorf( + "failed to cleanup temp cache path: %s for %s component: %v", + cachePath, + componentName, + err, + ) } } @@ -807,40 +849,63 @@ func init() { mountCmd.PersistentFlags().StringVar(&options.PassPhrase, "passphrase", "", "Key to decrypt config file. Can also be specified by env-variable BLOBFUSE2_SECURE_CONFIG_PASSPHRASE.\nKey length shall be 16 (AES-128), 24 (AES-192), or 32 (AES-256) bytes in length.") - mountCmd.PersistentFlags().String("log-type", "syslog", "Type of logger to be used by the system. Set to syslog by default. Allowed values are silent|syslog|base.") + mountCmd.PersistentFlags(). + String("log-type", "syslog", "Type of logger to be used by the system. Set to syslog by default. Allowed values are silent|syslog|base.") config.BindPFlag("logging.type", mountCmd.PersistentFlags().Lookup("log-type")) - _ = mountCmd.RegisterFlagCompletionFunc("log-type", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"silent", "base", "syslog"}, cobra.ShellCompDirectiveNoFileComp - }) + _ = mountCmd.RegisterFlagCompletionFunc( + "log-type", + func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"silent", "base", "syslog"}, cobra.ShellCompDirectiveNoFileComp + }, + ) // Add a generic cleanup-on-start flag that applies to all cache components - mountCmd.PersistentFlags().Bool("cleanup-on-start", false, "Clear cache directory on startup if not empty for file_cache, block_cache, xload components.") + mountCmd.PersistentFlags(). + Bool("cleanup-on-start", false, "Clear cache directory on startup if not empty for file_cache, block_cache, xload components.") config.BindPFlag("cleanup-on-start", mountCmd.PersistentFlags().Lookup("cleanup-on-start")) mountCmd.PersistentFlags().String("log-level", "LOG_WARNING", "Enables logs written to syslog. Set to LOG_WARNING by default. Allowed values are LOG_OFF|LOG_CRIT|LOG_ERR|LOG_WARNING|LOG_INFO|LOG_DEBUG") config.BindPFlag("logging.level", mountCmd.PersistentFlags().Lookup("log-level")) - _ = mountCmd.RegisterFlagCompletionFunc("log-level", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"LOG_OFF", "LOG_CRIT", "LOG_ERR", "LOG_WARNING", "LOG_INFO", "LOG_TRACE", "LOG_DEBUG"}, cobra.ShellCompDirectiveNoFileComp - }) + _ = mountCmd.RegisterFlagCompletionFunc( + "log-level", + func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{ + "LOG_OFF", + "LOG_CRIT", + "LOG_ERR", + "LOG_WARNING", + "LOG_INFO", + "LOG_TRACE", + "LOG_DEBUG", + }, cobra.ShellCompDirectiveNoFileComp + }, + ) mountCmd.PersistentFlags().String("log-file-path", common.DefaultLogFilePath, "Configures the path for log files. Default is "+common.DefaultLogFilePath) config.BindPFlag("logging.file-path", mountCmd.PersistentFlags().Lookup("log-file-path")) _ = mountCmd.MarkPersistentFlagDirname("log-file-path") - mountCmd.PersistentFlags().Bool("foreground", false, "Mount the system in foreground mode. Default value false.") + mountCmd.PersistentFlags(). + Bool("foreground", false, "Mount the system in foreground mode. Default value false.") config.BindPFlag("foreground", mountCmd.PersistentFlags().Lookup("foreground")) - mountCmd.PersistentFlags().Bool("read-only", false, "Mount the system in read only mode. Default value false.") + mountCmd.PersistentFlags(). + Bool("read-only", false, "Mount the system in read only mode. Default value false.") config.BindPFlag("read-only", mountCmd.PersistentFlags().Lookup("read-only")) - mountCmd.PersistentFlags().Bool("lazy-write", false, "Async write to storage container after file handle is closed.") + mountCmd.PersistentFlags(). + Bool("lazy-write", false, "Async write to storage container after file handle is closed.") config.BindPFlag("lazy-write", mountCmd.PersistentFlags().Lookup("lazy-write")) - mountCmd.PersistentFlags().String("default-working-dir", "", "Default working directory for storing log files and other blobfuse2 information") + mountCmd.PersistentFlags(). + String("default-working-dir", "", "Default working directory for storing log files and other blobfuse2 information") mountCmd.PersistentFlags().Lookup("default-working-dir").Hidden = true - config.BindPFlag("default-working-dir", mountCmd.PersistentFlags().Lookup("default-working-dir")) + config.BindPFlag( + "default-working-dir", + mountCmd.PersistentFlags().Lookup("default-working-dir"), + ) _ = mountCmd.MarkPersistentFlagDirname("default-working-dir") mountCmd.Flags().BoolVar(&options.Streaming, "streaming", false, "Enable Streaming.") @@ -850,7 +915,8 @@ func init() { mountCmd.Flags().BoolVar(&options.BlockCache, "block-cache", false, "Enable Block-Cache.") config.BindPFlag("block-cache", mountCmd.Flags().Lookup("block-cache")) - mountCmd.Flags().BoolVar(&options.Preload, "preload", false, "Enable Preload, to start downloading all files from container on mount.") + mountCmd.Flags(). + BoolVar(&options.Preload, "preload", false, "Enable Preload, to start downloading all files from container on mount.") config.BindPFlag("preload", mountCmd.Flags().Lookup("preload")) mountCmd.Flags().BoolVar(&options.AttrCache, "use-attr-cache", true, "Use attribute caching.") @@ -864,18 +930,25 @@ func init() { config.BindPFlag("pre-mount-validate", mountCmd.Flags().Lookup("pre-mount-validate")) mountCmd.Flags().Lookup("pre-mount-validate").Hidden = true - mountCmd.Flags().Bool("basic-remount-check", true, "Validate blobfuse2 is mounted by reading /etc/mtab.") + mountCmd.Flags(). + Bool("basic-remount-check", true, "Validate blobfuse2 is mounted by reading /etc/mtab.") config.BindPFlag("basic-remount-check", mountCmd.Flags().Lookup("basic-remount-check")) mountCmd.Flags().Lookup("basic-remount-check").Hidden = true - mountCmd.PersistentFlags().StringSliceVarP(&options.LibfuseOptions, "o", "o", []string{}, "FUSE options.") + mountCmd.PersistentFlags(). + StringSliceVarP(&options.LibfuseOptions, "o", "o", []string{}, "FUSE options.") config.BindPFlag("libfuse-options", mountCmd.PersistentFlags().ShorthandLookup("o")) mountCmd.PersistentFlags().ShorthandLookup("o").Hidden = true - mountCmd.PersistentFlags().DurationVar(&options.WaitForMount, "wait-for-mount", 5*time.Second, "Let parent process wait for given timeout before exit") + mountCmd.PersistentFlags(). + DurationVar(&options.WaitForMount, "wait-for-mount", 5*time.Second, "Let parent process wait for given timeout before exit") - mountCmd.PersistentFlags().Bool("disable-kernel-cache", false, "Disable kerneel cache, but keep blobfuse cache. Default value false.") - config.BindPFlag("disable-kernel-cache", mountCmd.PersistentFlags().Lookup("disable-kernel-cache")) + mountCmd.PersistentFlags(). + Bool("disable-kernel-cache", false, "Disable kerneel cache, but keep blobfuse cache. Default value false.") + config.BindPFlag( + "disable-kernel-cache", + mountCmd.PersistentFlags().Lookup("disable-kernel-cache"), + ) config.AttachToFlagSet(mountCmd.PersistentFlags()) config.AttachFlagCompletions(mountCmd) diff --git a/cmd/mount_all.go b/cmd/mount_all.go index eae1888403..54b727676f 100644 --- a/cmd/mount_all.go +++ b/cmd/mount_all.go @@ -176,7 +176,12 @@ func processCommand() error { if len(containerList) > 0 { containerList = filterAllowedContainerList(containerList) - err = mountAllContainers(containerList, options.ConfigFile, options.MountPath, configFileExists) + err = mountAllContainers( + containerList, + options.ConfigFile, + options.MountPath, + configFileExists, + ) if err != nil { return err } @@ -220,10 +225,7 @@ func getContainerList() ([]string, error) { // FiterAllowedContainer : Filter which containers are allowed to be mounted func filterAllowedContainerList(containers []string) []string { - allowListing := false - if len(mountAllOpts.AllowList) > 0 { - allowListing = true - } + allowListing := len(mountAllOpts.AllowList) > 0 // Convert the entire container list into a map var filterContainer = make(map[string]bool) @@ -262,7 +264,12 @@ func filterAllowedContainerList(containers []string) []string { } // mountAllContainers : Iterate allowed container list and create config file and mount path for them -func mountAllContainers(containerList []string, configFile string, mountPath string, configFileExists bool) error { +func mountAllContainers( + containerList []string, + configFile string, + mountPath string, + configFileExists bool, +) error { // Now iterate filtered container list and prepare mount path, temp path, and config file for them fileCachePath := "" _ = config.UnmarshalKey("file_cache.path", &fileCachePath) @@ -335,7 +342,11 @@ func mountAllContainers(containerList []string, configFile string, mountPath str } } - fmt.Printf("%d of %d containers were successfully mounted\n", (len(containerList) - failCount), len(containerList)) + fmt.Printf( + "%d of %d containers were successfully mounted\n", + (len(containerList) - failCount), + len(containerList), + ) return nil } diff --git a/cmd/mountgen1.go b/cmd/mountgen1.go index b5a0c45857..10032a2d5f 100644 --- a/cmd/mountgen1.go +++ b/cmd/mountgen1.go @@ -106,7 +106,8 @@ var gen1Cmd = &cobra.Command{ } // not checking ClientSecret since adlsgen1fuse will be reading secret from env variable (ADL_CLIENT_SECRET) - if azStorageOpt.ClientID == "" || azStorageOpt.TenantID == "" || azStorageOpt.AccountName == "" { + if azStorageOpt.ClientID == "" || azStorageOpt.TenantID == "" || + azStorageOpt.AccountName == "" { log.Err("mountgen1 : clientId, tenantId or accountName can't be empty") return fmt.Errorf("clientId, tenantId or accountName can't be empty") } @@ -181,7 +182,10 @@ func generateAdlsGenOneJson() error { var allowOther bool err := config.UnmarshalKey("allow-other", &allowOther) if err != nil { - log.Err("mountgen1 : generateAdlsGenOneJson:allow-other config error (invalid config attributes) [%s]", err.Error()) + log.Err( + "mountgen1 : generateAdlsGenOneJson:allow-other config error (invalid config attributes) [%s]", + err.Error(), + ) return fmt.Errorf("unable to parse allow-other config [%s]", err.Error()) } @@ -219,7 +223,10 @@ func generateAdlsGenOneJson() error { err = os.WriteFile(gen1ConfigFilePath, jsonData, 0777) if err != nil { - log.Err("mountgen1 : generateAdlsGenOneJson:failed to write adlsgen1fuse.json [%s]", err.Error()) + log.Err( + "mountgen1 : generateAdlsGenOneJson:failed to write adlsgen1fuse.json [%s]", + err.Error(), + ) return fmt.Errorf("failed to write adlsgen1fuse.json [%s]", err.Error()) } @@ -234,7 +241,11 @@ func runAdlsGenOneBinary() error { _, err := adlsgen1fuseCmd.Output() if err != nil { - log.Err("mountgen1 : runAdlsGenOneBinary: unable to run adlsgen1fuse binary (%s : %s)", err.Error(), errb.String()) + log.Err( + "mountgen1 : runAdlsGenOneBinary: unable to run adlsgen1fuse binary (%s : %s)", + err.Error(), + errb.String(), + ) return fmt.Errorf("unable to run adlsgen1fuse binary (%s : %s)", err.Error(), errb.String()) } @@ -247,9 +258,12 @@ func init() { gen1Cmd.Flags().StringVar(&configFile, "config-file", "config.yaml", "Configures the path for the file where the account credentials are provided. Default is config.yaml") - gen1Cmd.Flags().IntVar(&requiredFreeSpace, "required-free-space-mb", 0, "Required free space in MB") + gen1Cmd.Flags(). + IntVar(&requiredFreeSpace, "required-free-space-mb", 0, "Required free space in MB") - gen1Cmd.Flags().BoolVar(&generateJsonOnly, "generate-json-only", false, "Don't mount, only generate the JSON file needed for gen1 mount") + gen1Cmd.Flags(). + BoolVar(&generateJsonOnly, "generate-json-only", false, "Don't mount, only generate the JSON file needed for gen1 mount") - gen1Cmd.Flags().StringVar(&gen1ConfigFilePath, "output-file", "/tmp/adlsgen1fuse.json", "Output JSON file needed for gen1 mount") + gen1Cmd.Flags(). + StringVar(&gen1ConfigFilePath, "output-file", "/tmp/adlsgen1fuse.json", "Output JSON file needed for gen1 mount") } diff --git a/cmd/mountv1.go b/cmd/mountv1.go index 658cb99eec..10f390b6c4 100755 --- a/cmd/mountv1.go +++ b/cmd/mountv1.go @@ -94,13 +94,13 @@ type PipelineConfig struct { ReadOnlyOption bool `yaml:"read-only,omitempty"` AllowOtherOption bool `yaml:"allow-other,omitempty"` NonEmptyMountOption bool `yaml:"nonempty,omitempty"` - LogOptions `yaml:"logging,omitempty"` - libfuse.LibfuseOptions `yaml:"libfuse,omitempty"` - block_cache.StreamOptions `yaml:"stream,omitempty"` - file_cache.FileCacheOptions `yaml:"file_cache,omitempty"` - attr_cache.AttrCacheOptions `yaml:"attr_cache,omitempty"` - azstorage.AzStorageOptions `yaml:"azstorage,omitempty"` - ComponentsConfig `yaml:"components,omitempty"` + LogOptions ` yaml:"logging,omitempty"` + libfuse.LibfuseOptions ` yaml:"libfuse,omitempty"` + block_cache.StreamOptions ` yaml:"stream,omitempty"` + file_cache.FileCacheOptions ` yaml:"file_cache,omitempty"` + attr_cache.AttrCacheOptions ` yaml:"attr_cache,omitempty"` + azstorage.AzStorageOptions ` yaml:"azstorage,omitempty"` + ComponentsConfig ` yaml:"components,omitempty"` } var outputFilePath string @@ -175,13 +175,19 @@ var generateConfigCmd = &cobra.Command{ continue } if len(configParam) != 2 { - return fmt.Errorf("failed to read configuration file. Configuration %s is incorrect. Make sure your configuration file parameters are of the format `key value`", configParam) + return fmt.Errorf( + "failed to read configuration file. Configuration %s is incorrect. Make sure your configuration file parameters are of the format `key value`", + configParam, + ) } // get corresponding Blobfuse2 configurations from the config file parameters err := convertBfConfigParameter(cmd.Flags(), configParam[0], configParam[1]) if err != nil { - return fmt.Errorf("failed to convert configuration parameters [%s]", err.Error()) + return fmt.Errorf( + "failed to convert configuration parameters [%s]", + err.Error(), + ) } } @@ -229,14 +235,20 @@ var generateConfigCmd = &cobra.Command{ } accountType := "" - if bfv2StorageConfigOptions.AccountType == "" || bfv2StorageConfigOptions.AccountType == "blob" { + switch bfv2StorageConfigOptions.AccountType { + case "", "blob": accountType = "blob" - } else if bfv2StorageConfigOptions.AccountType == "adls" { + case "adls": accountType = "dfs" - } else { + default: return fmt.Errorf("invalid account type") } - bfv2StorageConfigOptions.Endpoint = fmt.Sprintf("%s://%s.%s.core.windows.net", http, accountName, accountType) + bfv2StorageConfigOptions.Endpoint = fmt.Sprintf( + "%s://%s.%s.core.windows.net", + http, + accountName, + accountType, + ) } bfv2StorageConfigOptions.VirtualDirectory = true @@ -264,7 +276,14 @@ var generateConfigCmd = &cobra.Command{ rootCmd.SetOut(buf) rootCmd.SetErr(buf) if enableGen1 { - rootCmd.SetArgs([]string{"mountgen1", mountPath, fmt.Sprintf("--config-file=%s", outputFilePath), fmt.Sprintf("--required-free-space-mb=%v", reqFreeSpaceMB)}) + rootCmd.SetArgs( + []string{ + "mountgen1", + mountPath, + fmt.Sprintf("--config-file=%s", outputFilePath), + fmt.Sprintf("--required-free-space-mb=%v", reqFreeSpaceMB), + }, + ) } else { rootCmd.SetArgs([]string{"mount", mountPath, fmt.Sprintf("--config-file=%s", outputFilePath), "--disable-version-check=true"}) } @@ -337,7 +356,11 @@ func parseFuseConfig(config []string) error { } // helper method: converts config file options -func convertBfConfigParameter(flags *pflag.FlagSet, configParameterKey string, configParameterValue string) error { +func convertBfConfigParameter( + flags *pflag.FlagSet, + configParameterKey string, + configParameterValue string, +) error { switch configParameterKey { case "logLevel": if !flags.Lookup("log-level").Changed { @@ -387,7 +410,10 @@ func convertBfConfigParameter(flags *pflag.FlagSet, configParameterKey string, c return nil default: - return fmt.Errorf("failed to parse configuration file. Configuration parameter `%s` is not supported in Blobfuse2", configParameterKey) + return fmt.Errorf( + "failed to parse configuration file. Configuration parameter `%s` is not supported in Blobfuse2", + configParameterKey, + ) } return nil @@ -395,10 +421,17 @@ func convertBfConfigParameter(flags *pflag.FlagSet, configParameterKey string, c // helper method: converts cli options - cli options that overlap with config file take precedence func convertBfCliParameters(flags *pflag.FlagSet) error { - if flags.Lookup("set-content-type").Changed || flags.Lookup("ca-cert-file").Changed || flags.Lookup("basic-remount-check").Changed || flags.Lookup( - "background-download").Changed || flags.Lookup("cache-poll-timeout-msec").Changed || flags.Lookup("upload-modified-only").Changed || flags.Lookup("debug-libcurl").Changed { + if flags.Lookup("set-content-type").Changed || flags.Lookup("ca-cert-file").Changed || + flags.Lookup("basic-remount-check").Changed || + flags.Lookup( + "background-download").Changed || + flags.Lookup("cache-poll-timeout-msec").Changed || + flags.Lookup("upload-modified-only").Changed || + flags.Lookup("debug-libcurl").Changed { logWriter, _ := syslog.New(syslog.LOG_WARNING, "") - _ = logWriter.Warning("one or more unsupported v1 parameters [set-content-type, ca-cert-file, basic-remount-check, background-download, cache-poll-timeout-msec, upload-modified-only, debug-libcurl] have been passed, ignoring and proceeding to mount") + _ = logWriter.Warning( + "one or more unsupported v1 parameters [set-content-type, ca-cert-file, basic-remount-check, background-download, cache-poll-timeout-msec, upload-modified-only, debug-libcurl] have been passed, ignoring and proceeding to mount", + ) } bfv2LoggingConfigOptions.Type = "syslog" @@ -414,7 +447,9 @@ func convertBfCliParameters(flags *pflag.FlagSet) error { bfv2StreamConfigOptions.BlockSize = bfConfCliOptions.blockSize } if flags.Lookup("max-blocks-per-file").Changed { - bfv2StreamConfigOptions.BufferSize = bfConfCliOptions.blockSize * uint64(bfConfCliOptions.maxBlocksPerFile) + bfv2StreamConfigOptions.BufferSize = bfConfCliOptions.blockSize * uint64( + bfConfCliOptions.maxBlocksPerFile, + ) } if flags.Lookup("stream-cache-mb").Changed { bfv2StreamConfigOptions.CachedObjLimit = bfConfCliOptions.streamCacheSize / bfv2StreamConfigOptions.BufferSize @@ -510,54 +545,93 @@ func convertBfCliParameters(flags *pflag.FlagSet) error { func init() { rootCmd.AddCommand(generateConfigCmd) - generateConfigCmd.Flags().StringVar(&outputFilePath, "output-file", "config.yaml", "Output Blobfuse configuration file.") - - generateConfigCmd.Flags().StringVar(&bfConfCliOptions.tmpPath, "tmp-path", "", "Tmp location for the file cache.") - generateConfigCmd.Flags().StringVar(&bfConfCliOptions.configFile, "config-file", "", "Input Blobfuse configuration file.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.useHttps, "use-https", false, "Enables HTTPS communication with Blob storage.") - generateConfigCmd.Flags().Uint32Var(&bfConfCliOptions.fileCacheTimeout, "file-cache-timeout-in-seconds", 0, "During this time, blobfuse will not check whether the file is up to date or not.") - generateConfigCmd.Flags().StringVar(&bfConfCliOptions.containerName, "container-name", "", "Required if no configuration file is specified.") - generateConfigCmd.Flags().StringVar(&bfConfCliOptions.logLevel, "log-level", "LOG_WARNING", "Logging level.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.useAttrCache, "use-attr-cache", false, "Enable attribute cache.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.useAdls, "use-adls", false, "Enables blobfuse to access Azure DataLake storage account.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.noSymlinks, "no-symlinks", false, "Disables symlink support.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.cacheOnList, "cache-on-list", true, "Cache attributes on listing.") - generateConfigCmd.Flags().Uint16Var(&bfConfCliOptions.maxConcurrency, "max-concurrency", 0, "Option to override default number of concurrent storage connections") - generateConfigCmd.Flags().Float64Var(&bfConfCliOptions.cacheSize, "cache-size-mb", 0, "File cache size.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.emptyDirCheck, "empty-dir-check", false, "Disallows remounting using a non-empty tmp-path.") - generateConfigCmd.Flags().Uint16Var(&bfConfCliOptions.cancelListOnMount, "cancel-list-on-mount-seconds", 0, "A list call to the container is by default issued on mount.") - generateConfigCmd.Flags().Uint32Var(&bfConfCliOptions.highDiskThreshold, "high-disk-threshold", 0, "High disk threshold percentage.") - generateConfigCmd.Flags().Uint32Var(&bfConfCliOptions.lowDiskThreshold, "low-disk-threshold", 0, "Low disk threshold percentage.") - generateConfigCmd.Flags().Uint32Var(&bfConfCliOptions.maxEviciton, "max-eviction", 0, "Number of files to be evicted from cache at once.") - generateConfigCmd.Flags().StringVar(&bfConfCliOptions.httpsProxy, "https-proxy", "", "HTTPS Proxy address.") - generateConfigCmd.Flags().StringVar(&bfConfCliOptions.httpProxy, "http-proxy", "", "HTTP Proxy address.") - generateConfigCmd.Flags().Int32Var(&bfConfCliOptions.maxRetry, "max-retry", 0, "Maximum retry count if the failure codes are retryable.") - generateConfigCmd.Flags().Int32Var(&bfConfCliOptions.maxRetryInterval, "max-retry-interval-in-seconds", 0, "Maximum number of seconds between 2 retries.") - generateConfigCmd.Flags().Int32Var(&bfConfCliOptions.retryDelayFactor, "retry-delay-factor", 0, "Retry delay between two tries") + generateConfigCmd.Flags(). + StringVar(&outputFilePath, "output-file", "config.yaml", "Output Blobfuse configuration file.") + + generateConfigCmd.Flags(). + StringVar(&bfConfCliOptions.tmpPath, "tmp-path", "", "Tmp location for the file cache.") + generateConfigCmd.Flags(). + StringVar(&bfConfCliOptions.configFile, "config-file", "", "Input Blobfuse configuration file.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.useHttps, "use-https", false, "Enables HTTPS communication with Blob storage.") + generateConfigCmd.Flags(). + Uint32Var(&bfConfCliOptions.fileCacheTimeout, "file-cache-timeout-in-seconds", 0, "During this time, blobfuse will not check whether the file is up to date or not.") + generateConfigCmd.Flags(). + StringVar(&bfConfCliOptions.containerName, "container-name", "", "Required if no configuration file is specified.") + generateConfigCmd.Flags(). + StringVar(&bfConfCliOptions.logLevel, "log-level", "LOG_WARNING", "Logging level.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.useAttrCache, "use-attr-cache", false, "Enable attribute cache.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.useAdls, "use-adls", false, "Enables blobfuse to access Azure DataLake storage account.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.noSymlinks, "no-symlinks", false, "Disables symlink support.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.cacheOnList, "cache-on-list", true, "Cache attributes on listing.") + generateConfigCmd.Flags(). + Uint16Var(&bfConfCliOptions.maxConcurrency, "max-concurrency", 0, "Option to override default number of concurrent storage connections") + generateConfigCmd.Flags(). + Float64Var(&bfConfCliOptions.cacheSize, "cache-size-mb", 0, "File cache size.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.emptyDirCheck, "empty-dir-check", false, "Disallows remounting using a non-empty tmp-path.") + generateConfigCmd.Flags(). + Uint16Var(&bfConfCliOptions.cancelListOnMount, "cancel-list-on-mount-seconds", 0, "A list call to the container is by default issued on mount.") + generateConfigCmd.Flags(). + Uint32Var(&bfConfCliOptions.highDiskThreshold, "high-disk-threshold", 0, "High disk threshold percentage.") + generateConfigCmd.Flags(). + Uint32Var(&bfConfCliOptions.lowDiskThreshold, "low-disk-threshold", 0, "Low disk threshold percentage.") + generateConfigCmd.Flags(). + Uint32Var(&bfConfCliOptions.maxEviciton, "max-eviction", 0, "Number of files to be evicted from cache at once.") + generateConfigCmd.Flags(). + StringVar(&bfConfCliOptions.httpsProxy, "https-proxy", "", "HTTPS Proxy address.") + generateConfigCmd.Flags(). + StringVar(&bfConfCliOptions.httpProxy, "http-proxy", "", "HTTP Proxy address.") + generateConfigCmd.Flags(). + Int32Var(&bfConfCliOptions.maxRetry, "max-retry", 0, "Maximum retry count if the failure codes are retryable.") + generateConfigCmd.Flags(). + Int32Var(&bfConfCliOptions.maxRetryInterval, "max-retry-interval-in-seconds", 0, "Maximum number of seconds between 2 retries.") + generateConfigCmd.Flags(). + Int32Var(&bfConfCliOptions.retryDelayFactor, "retry-delay-factor", 0, "Retry delay between two tries") //invalidate-on-sync is always on - accept it as an arg and just ignore it generateConfigCmd.Flags().Bool("invalidate-on-sync", true, "Invalidate file/dir on sync/fsync") //pre-mount-validate is always on - accept it as an arg and just ignore it generateConfigCmd.Flags().Bool("pre-mount-validate", true, "Validate blobfuse2 is mounted") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.useStreaming, "streaming", false, "Enable Streaming.") - generateConfigCmd.Flags().Uint64Var(&bfConfCliOptions.streamCacheSize, "stream-cache-mb", 0, "Limit total amount of data being cached in memory to conserve memory footprint of blobfuse.") - generateConfigCmd.Flags().IntVar(&bfConfCliOptions.maxBlocksPerFile, "max-blocks-per-file", 0, "Maximum number of blocks to be cached in memory for streaming.") - generateConfigCmd.Flags().Uint64Var(&bfConfCliOptions.blockSize, "block-size-mb", 0, "Size (in MB) of a block to be downloaded during streaming.") - - generateConfigCmd.Flags().StringSliceVarP(&libfuseOptions, "o", "o", []string{}, "FUSE options.") - generateConfigCmd.Flags().BoolVarP(&bfConfCliOptions.fuseLogging, "d", "d", false, "Mount with foreground and FUSE logs on.") - generateConfigCmd.Flags().BoolVar(&convertConfigOnly, "convert-config-only", false, "Don't mount - only convert v1 configuration to v2.") - generateConfigCmd.Flags().BoolVar(&bfConfCliOptions.ignoreOpenFlags, "ignore-open-flags", false, "Flag to ignore open flags unsupported by blobfuse.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.useStreaming, "streaming", false, "Enable Streaming.") + generateConfigCmd.Flags(). + Uint64Var(&bfConfCliOptions.streamCacheSize, "stream-cache-mb", 0, "Limit total amount of data being cached in memory to conserve memory footprint of blobfuse.") + generateConfigCmd.Flags(). + IntVar(&bfConfCliOptions.maxBlocksPerFile, "max-blocks-per-file", 0, "Maximum number of blocks to be cached in memory for streaming.") + generateConfigCmd.Flags(). + Uint64Var(&bfConfCliOptions.blockSize, "block-size-mb", 0, "Size (in MB) of a block to be downloaded during streaming.") + + generateConfigCmd.Flags(). + StringSliceVarP(&libfuseOptions, "o", "o", []string{}, "FUSE options.") + generateConfigCmd.Flags(). + BoolVarP(&bfConfCliOptions.fuseLogging, "d", "d", false, "Mount with foreground and FUSE logs on.") + generateConfigCmd.Flags(). + BoolVar(&convertConfigOnly, "convert-config-only", false, "Don't mount - only convert v1 configuration to v2.") + generateConfigCmd.Flags(). + BoolVar(&bfConfCliOptions.ignoreOpenFlags, "ignore-open-flags", false, "Flag to ignore open flags unsupported by blobfuse.") // options that are not available in V2: - generateConfigCmd.Flags().Bool("set-content-type", false, "Turns on automatic 'content-type' property based on the file extension.") - generateConfigCmd.Flags().String("ca-cert-file", "", "Specifies the proxy pem certificate path if its not in the default path.") - generateConfigCmd.Flags().Bool("basic-remount-check", false, "Check for an already mounted status using /etc/mtab.") - generateConfigCmd.Flags().Bool("background-download", false, "File download to run in the background on open call.") - generateConfigCmd.Flags().Uint64("cache-poll-timeout-msec", 0, "Time in milliseconds in order to poll for possible expired files awaiting cache eviction.") - generateConfigCmd.Flags().Bool("upload-modified-only", false, "Flag to turn off unnecessary uploads to storage.") - generateConfigCmd.Flags().Bool("debug-libcurl", false, "Flag to allow users to debug libcurl calls.") + generateConfigCmd.Flags(). + Bool("set-content-type", false, "Turns on automatic 'content-type' property based on the file extension.") + generateConfigCmd.Flags(). + String("ca-cert-file", "", "Specifies the proxy pem certificate path if its not in the default path.") + generateConfigCmd.Flags(). + Bool("basic-remount-check", false, "Check for an already mounted status using /etc/mtab.") + generateConfigCmd.Flags(). + Bool("background-download", false, "File download to run in the background on open call.") + generateConfigCmd.Flags(). + Uint64("cache-poll-timeout-msec", 0, "Time in milliseconds in order to poll for possible expired files awaiting cache eviction.") + generateConfigCmd.Flags(). + Bool("upload-modified-only", false, "Flag to turn off unnecessary uploads to storage.") + generateConfigCmd.Flags(). + Bool("debug-libcurl", false, "Flag to allow users to debug libcurl calls.") // flags for gen1 mount generateConfigCmd.Flags().BoolVar(&enableGen1, "enable-gen1", false, "To enable Gen1 mount") - generateConfigCmd.Flags().IntVar(&reqFreeSpaceMB, "required-free-space-mb", 0, "Required free space in MB") + generateConfigCmd.Flags(). + IntVar(&reqFreeSpaceMB, "required-free-space-mb", 0, "Required free space in MB") } diff --git a/cmd/root.go b/cmd/root.go index 8d8ad296a8..5c42924114 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -75,7 +75,9 @@ var rootCmd = &cobra.Command{ return err } } - return errors.New("missing command options\n\nDid you mean this?\n\tblobfuse2 mount\n\nRun 'blobfuse2 --help' for usage") + return errors.New( + "missing command options\n\nDid you mean this?\n\tblobfuse2 mount\n\nRun 'blobfuse2 --help' for usage", + ) }, } @@ -167,9 +169,22 @@ func beginDetectNewVersion() chan any { if isBlocked { // This version is blocked and customer shall not be allowed to use this. - blockedPage := common.BlobFuse2BlockingURL + "#" + strings.ReplaceAll(strings.ReplaceAll(common.Blobfuse2Version, ".", ""), "~", "") - fmt.Fprintf(stderr, "PANIC: Visit %s to see the list of known issues blocking your current version [%s]\n", blockedPage, common.Blobfuse2Version) - log.Warn("PANIC: Visit %s to see the list of known issues blocking your current version [%s]\n", blockedPage, common.Blobfuse2Version) + blockedPage := common.BlobFuse2BlockingURL + "#" + strings.ReplaceAll( + strings.ReplaceAll(common.Blobfuse2Version, ".", ""), + "~", + "", + ) + fmt.Fprintf( + stderr, + "PANIC: Visit %s to see the list of known issues blocking your current version [%s]\n", + blockedPage, + common.Blobfuse2Version, + ) + log.Warn( + "PANIC: Visit %s to see the list of known issues blocking your current version [%s]\n", + blockedPage, + common.Blobfuse2Version, + ) os.Exit(1) } else { // This version is not blocked but has know issues list which customer shall visit. @@ -180,11 +195,22 @@ func beginDetectNewVersion() chan any { } if local.OlderThan(*remote) { - executablePathSegments := strings.Split(strings.Replace(os.Args[0], "\\", "/", -1), "/") + executablePathSegments := strings.Split(strings.ReplaceAll(os.Args[0], "\\", "/"), "/") executableName := executablePathSegments[len(executablePathSegments)-1] - log.Info("beginDetectNewVersion: A new version of Blobfuse2 is available. Current Version=%s, Latest Version=%s", common.Blobfuse2Version, remoteVersion) - fmt.Fprintf(stderr, "*** "+executableName+": A new version [%s] is available. Consider upgrading to latest version for bug-fixes & new features. ***\n", remoteVersion) - log.Info("*** "+executableName+": A new version [%s] is available. Consider upgrading to latest version for bug-fixes & new features. ***\n", remoteVersion) + log.Info( + "beginDetectNewVersion: A new version of Blobfuse2 is available. Current Version=%s, Latest Version=%s", + common.Blobfuse2Version, + remoteVersion, + ) + fmt.Fprintf( + stderr, + "*** "+executableName+": A new version [%s] is available. Consider upgrading to latest version for bug-fixes & new features. ***\n", + remoteVersion, + ) + log.Info( + "*** "+executableName+": A new version [%s] is available. Consider upgrading to latest version for bug-fixes & new features. ***\n", + remoteVersion, + ) completed <- "A new version of Blobfuse2 is available" } @@ -198,7 +224,9 @@ func VersionCheck() error { //either wait till this routine completes or timeout if it exceeds 8 secs case <-beginDetectNewVersion(): case <-time.After(8 * time.Second): - return fmt.Errorf("unable to obtain latest version information. please check your internet connection") + return fmt.Errorf( + "unable to obtain latest version information. please check your internet connection", + ) } return nil } @@ -288,5 +316,6 @@ func Execute() error { } func init() { - rootCmd.PersistentFlags().BoolVar(&disableVersionCheck, "disable-version-check", false, "To disable version check that is performed automatically") + rootCmd.PersistentFlags(). + BoolVar(&disableVersionCheck, "disable-version-check", false, "To disable version check that is performed automatically") } diff --git a/cmd/secure.go b/cmd/secure.go index a1272951de..a39c7e6b47 100644 --- a/cmd/secure.go +++ b/cmd/secure.go @@ -136,7 +136,9 @@ func validateOptions() error { } if secOpts.PassPhrase == "" { - return errors.New("provide the passphrase as a cli parameter or configure the BLOBFUSE2_SECURE_CONFIG_PASSPHRASE environment variable") + return errors.New( + "provide the passphrase as a cli parameter or configure the BLOBFUSE2_SECURE_CONFIG_PASSPHRASE environment variable", + ) } return nil @@ -157,7 +159,10 @@ func encryptConfigFile(saveConfig bool) ([]byte, error) { if saveConfig { outputFileName := "" if secOpts.OutputFile == "" { - outputFileName = filepath.Join(common.ExpandPath(common.DefaultWorkDir), filepath.Base(secOpts.ConfigFile)) + outputFileName = filepath.Join( + common.ExpandPath(common.DefaultWorkDir), + filepath.Base(secOpts.ConfigFile), + ) outputFileName += SecureConfigExtension } else { outputFileName = secOpts.OutputFile @@ -184,7 +189,10 @@ func decryptConfigFile(saveConfig bool) ([]byte, error) { if saveConfig { outputFileName := "" if secOpts.OutputFile == "" { - outputFileName = filepath.Join(os.ExpandEnv(common.DefaultWorkDir), filepath.Base(secOpts.ConfigFile)) + outputFileName = filepath.Join( + os.ExpandEnv(common.DefaultWorkDir), + filepath.Base(secOpts.ConfigFile), + ) extension := filepath.Ext(outputFileName) outputFileName = outputFileName[0 : len(outputFileName)-len(extension)] } else { diff --git a/cmd/version.go b/cmd/version.go index 256f24bfca..16174fbdd1 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -59,5 +59,6 @@ var versionCmd = &cobra.Command{ func init() { rootCmd.AddCommand(versionCmd) - versionCmd.Flags().BoolVar(&check, "check", false, "To check whether latest version exists or not") + versionCmd.Flags(). + BoolVar(&check, "check", false, "To check whether latest version exists or not") } diff --git a/common/cache_policy/lru_policy.go b/common/cache_policy/lru_policy.go index f85f787d17..3ccf321ab1 100644 --- a/common/cache_policy/lru_policy.go +++ b/common/cache_policy/lru_policy.go @@ -112,7 +112,11 @@ func (cache *LRUCache) Put(key int64, value *common.Block) bool { func (cache *LRUCache) Print() { for _, value := range cache.Elements { - log.Debug("Key:%+v,Value:%+v\n", getKeyPair(value).value.StartIndex, getKeyPair(value).value.EndIndex) + log.Debug( + "Key:%+v,Value:%+v\n", + getKeyPair(value).value.StartIndex, + getKeyPair(value).value.EndIndex, + ) } } diff --git a/common/config/config_parser.go b/common/config/config_parser.go index a0dd16cd2a..86d409cc5d 100644 --- a/common/config/config_parser.go +++ b/common/config/config_parser.go @@ -227,7 +227,11 @@ func BindPFlag(key string, flag *pflag.Flag) { // // the key parameter should take on the value "auth.key" func UnmarshalKey(key string, obj any) error { - err := viper.UnmarshalKey(key, obj, func(decodeConfig *mapstructure.DecoderConfig) { decodeConfig.TagName = STRUCT_TAG }) + err := viper.UnmarshalKey( + key, + obj, + func(decodeConfig *mapstructure.DecoderConfig) { decodeConfig.TagName = STRUCT_TAG }, + ) if err != nil { return fmt.Errorf("config error: unmarshalling [%v]", err) } @@ -254,7 +258,10 @@ func UnmarshalKey(key string, obj any) error { // Unmarshal populates the passed object and all the exported fields. // use lower case attribute names to ignore a particular field func Unmarshal(obj any) error { - err := viper.Unmarshal(obj, func(decodeConfig *mapstructure.DecoderConfig) { decodeConfig.TagName = STRUCT_TAG }) + err := viper.Unmarshal( + obj, + func(decodeConfig *mapstructure.DecoderConfig) { decodeConfig.TagName = STRUCT_TAG }, + ) if err != nil { return fmt.Errorf("config error: unmarshalling [%v]", err) } @@ -390,7 +397,10 @@ func AddDurationFlag(name string, value time.Duration, usage string) *pflag.Flag return userOptions.flags.Lookup(name) } -func RegisterFlagCompletionFunc(flagName string, completionFunc func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective)) { +func RegisterFlagCompletionFunc( + flagName string, + completionFunc func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective), +) { userOptions.completionFuncMap[flagName] = completionFunc } @@ -410,5 +420,7 @@ func init() { userOptions.flagTree = NewTree() userOptions.envTree = NewTree() - userOptions.completionFuncMap = make(map[string]func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective)) + userOptions.completionFuncMap = make( + map[string]func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective), + ) } diff --git a/common/exectime/exectime.go b/common/exectime/exectime.go index d11df7b62f..b3f2f95dfe 100644 --- a/common/exectime/exectime.go +++ b/common/exectime/exectime.go @@ -77,7 +77,14 @@ func (t *Timer) PrintStats() { } for key, stat := range t.statsMap { total := stat.Mean() * time.Duration(stat.N) - msg := fmt.Sprintf("%s: avg=%s, std=%s, total=%s, ops/sec=%f\n", key, stat.Mean(), stat.StandardDeviation(), total, (1.0 / float64(stat.Mean().Seconds()))) + msg := fmt.Sprintf( + "%s: avg=%s, std=%s, total=%s, ops/sec=%f\n", + key, + stat.Mean(), + stat.StandardDeviation(), + total, + (1.0 / float64(stat.Mean().Seconds())), + ) _, err = t.out.Write([]byte(msg)) if err != nil { fmt.Printf("Timer::PrintStats: error writing [%s]\n", err) diff --git a/common/log/base_logger.go b/common/log/base_logger.go index 91dbf49602..45ff995a19 100644 --- a/common/log/base_logger.go +++ b/common/log/base_logger.go @@ -276,7 +276,11 @@ func (l *BaseLogger) LogRotate() error { _ = os.Rename(l.fileConfig.LogFile, l.fileConfig.LogFile+".1") var err error - l.logFileHandle, err = os.OpenFile(l.fileConfig.LogFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + l.logFileHandle, err = os.OpenFile( + l.fileConfig.LogFile, + os.O_CREATE|os.O_WRONLY|os.O_APPEND, + 0644, + ) if err != nil { l.logFileHandle = os.Stdout } diff --git a/common/log/logger.go b/common/log/logger.go index 17150b5563..093540a497 100644 --- a/common/log/logger.go +++ b/common/log/logger.go @@ -72,7 +72,8 @@ func NewLogger(name string, config common.LogConfig) (Logger, error) { config.Tag = common.FileSystemName } - if name == "base" { + switch name { + case "base": baseLogger, err := newBaseLogger(LogFileConfig{ LogFile: config.FilePath, LogLevel: config.Level, @@ -84,13 +85,13 @@ func NewLogger(name string, config common.LogConfig) (Logger, error) { return nil, err } return baseLogger, nil - } else if name == "silent" { + case "silent": silentLogger := &SilentLogger{} return silentLogger, nil - } else if name == "" || name == "default" || name == "syslog" { + case "", "default", "syslog": sysLogger, err := newSysLogger(config.Level, config.Tag) if err != nil { - if err == NoSyslogService { + if err == ErrNoSyslogService { // Syslog service does not exists on this system // fallback to file based logging. return NewLogger("base", config) diff --git a/common/log/sys_logger.go b/common/log/sys_logger.go index ead544d1f8..551740a0df 100644 --- a/common/log/sys_logger.go +++ b/common/log/sys_logger.go @@ -50,7 +50,7 @@ type SysLogger struct { logger *log.Logger } -var NoSyslogService = errors.New("failed to create syslog object") +var ErrNoSyslogService = errors.New("failed to create syslog object") func newSysLogger(lvl common.LogLevel, tag string) (*SysLogger, error) { l := &SysLogger{ @@ -87,7 +87,7 @@ func (l *SysLogger) init() error { logwriter, e := syslog.New(getSyslogLevel(l.level), l.tag) if e != nil { - return NoSyslogService + return ErrNoSyslogService } l.logger = log.New(logwriter, "", 0) diff --git a/common/types.go b/common/types.go index 2749993840..8689d2f2a4 100644 --- a/common/types.go +++ b/common/types.go @@ -76,7 +76,21 @@ const ( ) func FuseIgnoredFlags() []string { - return []string{"default_permissions", "rw", "dev", "nodev", "suid", "nosuid", "delay_connect", "auto", "noauto", "user", "nouser", "exec", "noexec"} + return []string{ + "default_permissions", + "rw", + "dev", + "nodev", + "suid", + "nosuid", + "delay_connect", + "auto", + "noauto", + "user", + "nouser", + "exec", + "noexec", + } } var Blobfuse2Version = Blobfuse2Version_() @@ -213,7 +227,8 @@ func (bol BlockOffsetList) BinarySearch(offset int64) (bool, int) { for lowerBound <= higherBound { middleIndex := (lowerBound + higherBound) / 2 // we found the starting block that changes are being applied to - if bol.BlockList[middleIndex].EndIndex > offset && bol.BlockList[middleIndex].StartIndex <= offset { + if bol.BlockList[middleIndex].EndIndex > offset && + bol.BlockList[middleIndex].StartIndex <= offset { return true, middleIndex // if the end index is smaller or equal then we need to increase our lower bound } else if bol.BlockList[middleIndex].EndIndex <= offset { @@ -240,7 +255,8 @@ func (bol BlockOffsetList) FindBlocks(offset, length int64) ([]*Block, bool) { if blk.StartIndex > offset+length { break } - if currentBlockOffset >= blk.StartIndex && currentBlockOffset < blk.EndIndex && currentBlockOffset <= offset+length { + if currentBlockOffset >= blk.StartIndex && currentBlockOffset < blk.EndIndex && + currentBlockOffset <= offset+length { blocks = append(blocks, blk) currentBlockOffset = blk.EndIndex } @@ -263,7 +279,8 @@ func (bol BlockOffsetList) FindBlocksToModify(offset, length int64) (int, int64, if blk.StartIndex > offset+length { break } - if currentBlockOffset >= blk.StartIndex && currentBlockOffset < blk.EndIndex && currentBlockOffset <= offset+length { + if currentBlockOffset >= blk.StartIndex && currentBlockOffset < blk.EndIndex && + currentBlockOffset <= offset+length { appendOnly = false blk.Flags.Set(DirtyBlock) currentBlockOffset = blk.EndIndex @@ -310,8 +327,8 @@ func NewUUID() (u uuid) { } // returns block id of given length -func GetBlockID(len int64) string { - return base64.StdEncoding.EncodeToString(NewUUIDWithLength(len)) +func GetBlockID(length int64) string { + return base64.StdEncoding.EncodeToString(NewUUIDWithLength(length)) } func GetIdLength(id string) int64 { diff --git a/common/util.go b/common/util.go index dc613420e6..98e47db3a1 100644 --- a/common/util.go +++ b/common/util.go @@ -105,7 +105,7 @@ func IsMountActive(path string) (bool, error) { } // out contains the list of pids of the processes that are running - pidString := strings.Replace(out.String(), "\n", " ", -1) + pidString := strings.ReplaceAll(out.String(), "\n", " ") pids := strings.Split(pidString, " ") myPid := strconv.Itoa(os.Getpid()) for _, pid := range pids { @@ -122,7 +122,11 @@ func IsMountActive(path string) (bool, error) { err := cmd.Run() if err != nil { - return true, fmt.Errorf("failed to get command line arguments for pid %s [%v]", pid, err.Error()) + return true, fmt.Errorf( + "failed to get command line arguments for pid %s [%v]", + pid, + err.Error(), + ) } if strings.Contains(out.String(), path) { @@ -364,7 +368,14 @@ func NotifyMountToParent() error { return nil } -var duPath []string = []string{"/usr/bin/du", "/usr/local/bin/du", "/usr/sbin/du", "/usr/local/sbin/du", "/sbin/du", "/bin/du"} +var duPath []string = []string{ + "/usr/bin/du", + "/usr/local/bin/du", + "/usr/sbin/du", + "/usr/local/sbin/du", + "/sbin/du", + "/bin/du", +} var selectedDuPath string = "" // GetUsage: The current disk usage in MB @@ -512,10 +523,10 @@ func WriteToFile(filename string, data string, options WriteToFileOptions) error return nil } -func GetCRC64(data []byte, len int) []byte { +func GetCRC64(data []byte, length int) []byte { // Create a CRC64 hash using the ECMA polynomial crc64Table := crc64.MakeTable(crc64.ECMA) - checksum := crc64.Checksum(data[:len], crc64Table) + checksum := crc64.Checksum(data[:length], crc64Table) checksumBytes := make([]byte, 8) binary.BigEndian.PutUint64(checksumBytes, checksum) diff --git a/common/util_test.go b/common/util_test.go index 5a57b3b0f3..b6c3e0fbd4 100644 --- a/common/util_test.go +++ b/common/util_test.go @@ -125,8 +125,7 @@ func (suite *utilTestSuite) TestIsMountActiveTwoMounts() { err = cmd.Run() suite.assert.Nil(err) - res, err := IsMountActive(mntdir) - suite.assert.Nil(err) + res, _ := IsMountActive(mntdir) suite.assert.True(res) res, err = IsMountActive("/mnt/blobfuse") diff --git a/component/attr_cache/attr_cache.go b/component/attr_cache/attr_cache.go index e9636c6ea5..b65efec82d 100644 --- a/component/attr_cache/attr_cache.go +++ b/component/attr_cache/attr_cache.go @@ -66,9 +66,9 @@ type AttrCache struct { // Structure defining your config parameters type AttrCacheOptions struct { - Timeout uint32 `config:"timeout-sec" yaml:"timeout-sec,omitempty"` + Timeout uint32 `config:"timeout-sec" yaml:"timeout-sec,omitempty"` NoCacheOnList bool `config:"no-cache-on-list" yaml:"no-cache-on-list,omitempty"` - NoSymlinks bool `config:"no-symlinks" yaml:"no-symlinks,omitempty"` + NoSymlinks bool `config:"no-symlinks" yaml:"no-symlinks,omitempty"` //maximum file attributes overall to be cached MaxFiles int `config:"max-files" yaml:"max-files,omitempty"` @@ -346,7 +346,9 @@ func (ac *AttrCache) DeleteDir(options internal.DeleteDirOptions) error { } // ReadDir : Optionally cache attributes of paths returned by next component -func (ac *AttrCache) ReadDir(options internal.ReadDirOptions) (pathList []*internal.ObjAttr, err error) { +func (ac *AttrCache) ReadDir( + options internal.ReadDirOptions, +) (pathList []*internal.ObjAttr, err error) { log.Trace("AttrCache::ReadDir : %s", options.Name) pathList, err = ac.NextComponent().ReadDir(options) @@ -358,7 +360,9 @@ func (ac *AttrCache) ReadDir(options internal.ReadDirOptions) (pathList []*inter } // StreamDir : Optionally cache attributes of paths returned by next component -func (ac *AttrCache) StreamDir(options internal.StreamDirOptions) ([]*internal.ObjAttr, string, error) { +func (ac *AttrCache) StreamDir( + options internal.StreamDirOptions, +) ([]*internal.ObjAttr, string, error) { log.Trace("AttrCache::StreamDir : %s", options.Name) pathList, token, err := ac.NextComponent().StreamDir(options) @@ -379,12 +383,19 @@ func (ac *AttrCache) cacheAttributes(pathList []*internal.ObjAttr) { for _, attr := range pathList { if len(ac.cacheMap) > ac.maxFiles { - log.Debug("AttrCache::cacheAttributes : %s skipping adding path to attribute cache because it is full", pathList) + log.Debug( + "AttrCache::cacheAttributes : %s skipping adding path to attribute cache because it is full", + pathList, + ) break } ac.cacheLock.Lock() - ac.cacheMap[internal.TruncateDirName(attr.Path)] = newAttrCacheItem(attr, true, currTime) + ac.cacheMap[internal.TruncateDirName(attr.Path)] = newAttrCacheItem( + attr, + true, + currTime, + ) ac.cacheLock.Unlock() } @@ -461,10 +472,12 @@ func (ac *AttrCache) RenameFile(options internal.RenameFileOptions) error { func (ac *AttrCache) WriteFile(options *internal.WriteFileOptions) (int, error) { // GetAttr on cache hit will serve from cache, on cache miss will serve from next component. - attr, err := ac.GetAttr(internal.GetAttrOptions{Name: options.Handle.Path, RetrieveMetadata: true}) + attr, err := ac.GetAttr( + internal.GetAttrOptions{Name: options.Handle.Path, RetrieveMetadata: true}, + ) if err != nil { // Ignore not exists errors - this can happen if createEmptyFile is set to false - if !(os.IsNotExist(err) || err == syscall.ENOENT) { + if !os.IsNotExist(err) && err != syscall.ENOENT { return 0, err } } @@ -513,7 +526,7 @@ func (ac *AttrCache) CopyFromFile(options internal.CopyFromFileOptions) error { attr, err := ac.GetAttr(internal.GetAttrOptions{Name: options.Name, RetrieveMetadata: true}) if err != nil { // Ignore not exists errors - this can happen if createEmptyFile is set to false - if !(os.IsNotExist(err) || err == syscall.ENOENT) { + if !os.IsNotExist(err) && err != syscall.ENOENT { return err } } @@ -583,14 +596,15 @@ func (ac *AttrCache) GetAttr(options internal.GetAttrOptions) (*internal.ObjAttr ac.cacheLock.Lock() defer ac.cacheLock.Unlock() - if err == nil { + switch err { + case nil: // Retrieved attributes so cache them if len(ac.cacheMap) < ac.maxFiles { ac.cacheMap[truncatedPath] = newAttrCacheItem(pathAttr, true, time.Now()) } else { log.Debug("AttrCache::GetAttr : %s skipping adding to attribute cache because it is full", options.Name) } - } else if err == syscall.ENOENT { + case syscall.ENOENT: // Path does not exist so cache a no-entry item ac.cacheMap[truncatedPath] = newAttrCacheItem(&internal.ObjAttr{}, false, time.Now()) } @@ -608,7 +622,9 @@ func (ac *AttrCache) CreateLink(options internal.CreateLinkOptions) error { ac.cacheLock.RLock() defer ac.cacheLock.RUnlock() ac.invalidatePath(options.Name) - ac.invalidatePath(options.Target) // TODO : Why do we invalidate the target? Shouldn't the target remain unchanged? + ac.invalidatePath( + options.Target, + ) // TODO : Why do we invalidate the target? Shouldn't the target remain unchanged? } return err @@ -684,10 +700,18 @@ func NewAttrCacheComponent() internal.Component { func init() { internal.AddComponent(compName, NewAttrCacheComponent) - attrCacheTimeout := config.AddUint32Flag("attr-cache-timeout", defaultAttrCacheTimeout, "attribute cache timeout") + attrCacheTimeout := config.AddUint32Flag( + "attr-cache-timeout", + defaultAttrCacheTimeout, + "attribute cache timeout", + ) config.BindPFlag(compName+".timeout-sec", attrCacheTimeout) - noSymlinks := config.AddBoolFlag("no-symlinks", false, "whether or not symlinks should be supported") + noSymlinks := config.AddBoolFlag( + "no-symlinks", + false, + "whether or not symlinks should be supported", + ) config.BindPFlag(compName+".no-symlinks", noSymlinks) cacheOnList := config.AddBoolFlag("cache-on-list", true, "Cache attributes on listing.") diff --git a/component/azstorage/azauth.go b/component/azstorage/azauth.go index 483068aa7e..7150f3aef9 100644 --- a/component/azstorage/azauth.go +++ b/component/azstorage/azauth.go @@ -220,12 +220,18 @@ func (base *azOAuthBase) getAzIdentityClientOptions(config *azAuthConfig) azcore } if config.ActiveDirectoryEndpoint != "" { - log.Debug("azAuthBase::getAzIdentityClientOptions : ActiveDirectoryAuthorityHost = %s", config.ActiveDirectoryEndpoint) + log.Debug( + "azAuthBase::getAzIdentityClientOptions : ActiveDirectoryAuthorityHost = %s", + config.ActiveDirectoryEndpoint, + ) opts.Cloud.ActiveDirectoryAuthorityHost = config.ActiveDirectoryEndpoint } if config.AuthResource != "" { if val, ok := opts.Cloud.Services[cloud.ResourceManager]; ok { - log.Debug("azAuthBase::getAzIdentityClientOptions : AuthResource = %s", config.AuthResource) + log.Debug( + "azAuthBase::getAzIdentityClientOptions : AuthResource = %s", + config.AuthResource, + ) val.Endpoint = config.AuthResource opts.Cloud.Services[cloud.ResourceManager] = val } diff --git a/component/azstorage/azauthWorkloadIdentity.go b/component/azstorage/azauthWorkloadIdentity.go index 8090e3a387..c4eb31ed51 100644 --- a/component/azstorage/azauthWorkloadIdentity.go +++ b/component/azstorage/azauthWorkloadIdentity.go @@ -63,7 +63,10 @@ func (azWorkloadIdentity *azAuthWorkloadIdentity) getTokenCredential() (azcore.T msiOpts.ID = azidentity.ClientID(azWorkloadIdentity.config.ApplicationID) cred, err := azidentity.NewManagedIdentityCredential(msiOpts) if err != nil { - log.Err("azAuthWorkloadIdentity::getTokenCredential : Failed to create managed identity credential [%s]", err.Error()) + log.Err( + "azAuthWorkloadIdentity::getTokenCredential : Failed to create managed identity credential [%s]", + err.Error(), + ) return nil, err } @@ -78,7 +81,10 @@ func (azWorkloadIdentity *azAuthWorkloadIdentity) getTokenCredential() (azcore.T }) if err != nil { - log.Err("azAuthWorkloadIdentity::getTokenCredential : Failed to get token from managed identity credential [%s]", err.Error()) + log.Err( + "azAuthWorkloadIdentity::getTokenCredential : Failed to get token from managed identity credential [%s]", + err.Error(), + ) return "", err } @@ -114,22 +120,33 @@ type azAuthBlobWorkloadIdentity struct { } // getServiceClient : returns SPN based service client for blob -func (azWorkloadIdentity *azAuthBlobWorkloadIdentity) getServiceClient(stConfig *AzStorageConfig) (any, error) { +func (azWorkloadIdentity *azAuthBlobWorkloadIdentity) getServiceClient( + stConfig *AzStorageConfig, +) (any, error) { cred, err := azWorkloadIdentity.getTokenCredential() if err != nil { - log.Err("azAuthBlobWorkloadIdentity::getServiceClient : Failed to get token credential from client assertion [%s]", err.Error()) + log.Err( + "azAuthBlobWorkloadIdentity::getServiceClient : Failed to get token credential from client assertion [%s]", + err.Error(), + ) return nil, err } opts, err := getAzBlobServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthBlobWorkloadIdentity::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthBlobWorkloadIdentity::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := service.NewClient(azWorkloadIdentity.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthBlobWorkloadIdentity::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthBlobWorkloadIdentity::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err @@ -140,22 +157,33 @@ type azAuthDatalakeWorkloadIdentity struct { } // getServiceClient : returns SPN based service client for blob -func (azWorkloadIdentity *azAuthDatalakeWorkloadIdentity) getServiceClient(stConfig *AzStorageConfig) (any, error) { +func (azWorkloadIdentity *azAuthDatalakeWorkloadIdentity) getServiceClient( + stConfig *AzStorageConfig, +) (any, error) { cred, err := azWorkloadIdentity.getTokenCredential() if err != nil { - log.Err("azAuthDatalakeWorkloadIdentity::getServiceClient : Failed to get token credential from client assertion [%s]", err.Error()) + log.Err( + "azAuthDatalakeWorkloadIdentity::getServiceClient : Failed to get token credential from client assertion [%s]", + err.Error(), + ) return nil, err } opts, err := getAzDatalakeServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthDatalakeWorkloadIdentity::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthDatalakeWorkloadIdentity::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := serviceBfs.NewClient(azWorkloadIdentity.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthDatalakeWorkloadIdentity::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthDatalakeWorkloadIdentity::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err diff --git a/component/azstorage/azauthcli.go b/component/azstorage/azauthcli.go index ee4fd38aca..54fde51ff0 100644 --- a/component/azstorage/azauthcli.go +++ b/component/azstorage/azauthcli.go @@ -62,19 +62,28 @@ type azAuthBlobCLI struct { func (azcli *azAuthBlobCLI) getServiceClient(stConfig *AzStorageConfig) (any, error) { cred, err := azcli.getTokenCredential() if err != nil { - log.Err("azAuthBlobCLI::getServiceClient : Failed to get token credential from azcli [%s]", err.Error()) + log.Err( + "azAuthBlobCLI::getServiceClient : Failed to get token credential from azcli [%s]", + err.Error(), + ) return nil, err } opts, err := getAzBlobServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthBlobCLI::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthBlobCLI::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := service.NewClient(azcli.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthBlobCLI::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthBlobCLI::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err @@ -88,19 +97,28 @@ type azAuthDatalakeCLI struct { func (azcli *azAuthDatalakeCLI) getServiceClient(stConfig *AzStorageConfig) (any, error) { cred, err := azcli.getTokenCredential() if err != nil { - log.Err("azAuthDatalakeCLI::getServiceClient : Failed to get token credential from azcli [%s]", err.Error()) + log.Err( + "azAuthDatalakeCLI::getServiceClient : Failed to get token credential from azcli [%s]", + err.Error(), + ) return nil, err } opts, err := getAzDatalakeServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthDatalakeCLI::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthDatalakeCLI::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := serviceBfs.NewClient(azcli.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthDatalakeCLI::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthDatalakeCLI::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err diff --git a/component/azstorage/azauthkey.go b/component/azstorage/azauthkey.go index 000820dee4..c2c3c89087 100644 --- a/component/azstorage/azauthkey.go +++ b/component/azstorage/azauthkey.go @@ -58,25 +58,36 @@ type azAuthBlobKey struct { // getServiceClient : returns shared key based service client for blob func (azkey *azAuthBlobKey) getServiceClient(stConfig *AzStorageConfig) (any, error) { if azkey.config.AccountKey == "" { - log.Err("azAuthBlobKey::getServiceClient : Shared key for account is empty, cannot authenticate user") + log.Err( + "azAuthBlobKey::getServiceClient : Shared key for account is empty, cannot authenticate user", + ) return nil, errors.New("shared key for account is empty, cannot authenticate user") } cred, err := azblob.NewSharedKeyCredential(azkey.config.AccountName, azkey.config.AccountKey) if err != nil { - log.Err("azAuthBlobKey::getServiceClient : Failed to create shared key credential [%s]", err.Error()) + log.Err( + "azAuthBlobKey::getServiceClient : Failed to create shared key credential [%s]", + err.Error(), + ) return nil, err } opts, err := getAzBlobServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthBlobKey::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthBlobKey::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := service.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthBlobKey::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthBlobKey::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err @@ -89,25 +100,39 @@ type azAuthDatalakeKey struct { // getServiceClient : returns shared key based service client for datalake func (azkey *azAuthDatalakeKey) getServiceClient(stConfig *AzStorageConfig) (any, error) { if azkey.config.AccountKey == "" { - log.Err("azAuthDatalakeKey::getServiceClient : Shared key for account is empty, cannot authenticate user") + log.Err( + "azAuthDatalakeKey::getServiceClient : Shared key for account is empty, cannot authenticate user", + ) return nil, errors.New("shared key for account is empty, cannot authenticate user") } - cred, err := azdatalake.NewSharedKeyCredential(azkey.config.AccountName, azkey.config.AccountKey) + cred, err := azdatalake.NewSharedKeyCredential( + azkey.config.AccountName, + azkey.config.AccountKey, + ) if err != nil { - log.Err("azAuthDatalakeKey::getServiceClient : Failed to create shared key credential [%s]", err.Error()) + log.Err( + "azAuthDatalakeKey::getServiceClient : Failed to create shared key credential [%s]", + err.Error(), + ) return nil, err } opts, err := getAzDatalakeServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthDatalakeKey::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthDatalakeKey::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := serviceBfs.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthDatalakeKey::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthDatalakeKey::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err diff --git a/component/azstorage/azauthmsi.go b/component/azstorage/azauthmsi.go index c9d969e871..1c4cdc1e82 100644 --- a/component/azstorage/azauthmsi.go +++ b/component/azstorage/azauthmsi.go @@ -112,19 +112,28 @@ type azAuthBlobMSI struct { func (azmsi *azAuthBlobMSI) getServiceClient(stConfig *AzStorageConfig) (any, error) { cred, err := azmsi.getTokenCredential() if err != nil { - log.Err("azAuthBlobMSI::getServiceClient : Failed to get token credential from MSI [%s]", err.Error()) + log.Err( + "azAuthBlobMSI::getServiceClient : Failed to get token credential from MSI [%s]", + err.Error(), + ) return nil, err } opts, err := getAzBlobServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthBlobMSI::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthBlobMSI::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := service.NewClient(azmsi.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthBlobMSI::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthBlobMSI::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err @@ -138,19 +147,28 @@ type azAuthDatalakeMSI struct { func (azmsi *azAuthDatalakeMSI) getServiceClient(stConfig *AzStorageConfig) (any, error) { cred, err := azmsi.getTokenCredential() if err != nil { - log.Err("azAuthDatalakeMSI::getServiceClient : Failed to get token credential from MSI [%s]", err.Error()) + log.Err( + "azAuthDatalakeMSI::getServiceClient : Failed to get token credential from MSI [%s]", + err.Error(), + ) return nil, err } opts, err := getAzDatalakeServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthDatalakeMSI::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthDatalakeMSI::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := serviceBfs.NewClient(azmsi.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthDatalakeMSI::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthDatalakeMSI::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err diff --git a/component/azstorage/azauthsas.go b/component/azstorage/azauthsas.go index 758ad4b26d..8a97930b82 100644 --- a/component/azstorage/azauthsas.go +++ b/component/azstorage/azauthsas.go @@ -69,19 +69,27 @@ type azAuthBlobSAS struct { // getServiceClient : returns SAS based service client for blob func (azsas *azAuthBlobSAS) getServiceClient(stConfig *AzStorageConfig) (any, error) { if azsas.config.SASKey == "" { - log.Err("azAuthBlobSAS::getServiceClient : SAS key for account is empty, cannot authenticate user") + log.Err( + "azAuthBlobSAS::getServiceClient : SAS key for account is empty, cannot authenticate user", + ) return nil, errors.New("sas key for account is empty, cannot authenticate user") } opts, err := getAzBlobServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthBlobSAS::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthBlobSAS::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := service.NewClientWithNoCredential(azsas.getEndpoint(), opts) if err != nil { - log.Err("azAuthBlobSAS::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthBlobSAS::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err @@ -94,19 +102,27 @@ type azAuthDatalakeSAS struct { // getServiceClient : returns SAS based service client for datalake func (azsas *azAuthDatalakeSAS) getServiceClient(stConfig *AzStorageConfig) (any, error) { if azsas.config.SASKey == "" { - log.Err("azAuthDatalakeSAS::getServiceClient : SAS key for account is empty, cannot authenticate user") + log.Err( + "azAuthDatalakeSAS::getServiceClient : SAS key for account is empty, cannot authenticate user", + ) return nil, errors.New("sas key for account is empty, cannot authenticate user") } opts, err := getAzDatalakeServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthDatalakeSAS::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthDatalakeSAS::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := serviceBfs.NewClientWithNoCredential(azsas.getEndpoint(), opts) if err != nil { - log.Err("azAuthDatalakeSAS::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthDatalakeSAS::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err diff --git a/component/azstorage/azauthspn.go b/component/azstorage/azauthspn.go index 95a87c41d4..a7f4a92a6b 100644 --- a/component/azstorage/azauthspn.go +++ b/component/azstorage/azauthspn.go @@ -61,14 +61,19 @@ func (azspn *azAuthSPN) getTokenCredential() (azcore.TokenCredential, error) { log.Trace("AzAuthSPN::getTokenCredential : Going for fedrated token flow") // TODO:: track2 : test this in Azure Kubernetes setup - cred, err = azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{ - ClientOptions: clOpts, - ClientID: azspn.config.ClientID, - TenantID: azspn.config.TenantID, - TokenFilePath: azspn.config.OAuthTokenFilePath, - }) + cred, err = azidentity.NewWorkloadIdentityCredential( + &azidentity.WorkloadIdentityCredentialOptions{ + ClientOptions: clOpts, + ClientID: azspn.config.ClientID, + TenantID: azspn.config.TenantID, + TokenFilePath: azspn.config.OAuthTokenFilePath, + }, + ) if err != nil { - log.Err("AzAuthSPN::getTokenCredential : Failed to generate token for SPN [%s]", err.Error()) + log.Err( + "AzAuthSPN::getTokenCredential : Failed to generate token for SPN [%s]", + err.Error(), + ) return nil, err } } else if azspn.config.WorkloadIdentityToken != "" { @@ -108,19 +113,28 @@ type azAuthBlobSPN struct { func (azspn *azAuthBlobSPN) getServiceClient(stConfig *AzStorageConfig) (any, error) { cred, err := azspn.getTokenCredential() if err != nil { - log.Err("azAuthBlobSPN::getServiceClient : Failed to get token credential from SPN [%s]", err.Error()) + log.Err( + "azAuthBlobSPN::getServiceClient : Failed to get token credential from SPN [%s]", + err.Error(), + ) return nil, err } opts, err := getAzBlobServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthBlobSPN::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthBlobSPN::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := service.NewClient(azspn.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthBlobSPN::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthBlobSPN::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err @@ -134,19 +148,28 @@ type azAuthDatalakeSPN struct { func (azspn *azAuthDatalakeSPN) getServiceClient(stConfig *AzStorageConfig) (any, error) { cred, err := azspn.getTokenCredential() if err != nil { - log.Err("azAuthDatalakeSPN::getServiceClient : Failed to get token credential from SPN [%s]", err.Error()) + log.Err( + "azAuthDatalakeSPN::getServiceClient : Failed to get token credential from SPN [%s]", + err.Error(), + ) return nil, err } opts, err := getAzDatalakeServiceClientOptions(stConfig) if err != nil { - log.Err("azAuthDatalakeSPN::getServiceClient : Failed to create client options [%s]", err.Error()) + log.Err( + "azAuthDatalakeSPN::getServiceClient : Failed to create client options [%s]", + err.Error(), + ) return nil, err } svcClient, err := serviceBfs.NewClient(azspn.config.Endpoint, cred, opts) if err != nil { - log.Err("azAuthDatalakeSPN::getServiceClient : Failed to create service client [%s]", err.Error()) + log.Err( + "azAuthDatalakeSPN::getServiceClient : Failed to create service client [%s]", + err.Error(), + ) } return svcClient, err diff --git a/component/azstorage/azstorage.go b/component/azstorage/azstorage.go index b17ef9e58e..7696ebeacc 100644 --- a/component/azstorage/azstorage.go +++ b/component/azstorage/azstorage.go @@ -106,7 +106,9 @@ reconfigure: // If user has not specified the account type then detect it's HNS or FNS if conf.AccountType == "" && !config.IsSet(compName+".use-adls") && az.storage.IsAccountADLS() { - log.Crit("AzStorage::Configure : Auto detected account type as adls, reconfiguring storage connection.") + log.Crit( + "AzStorage::Configure : Auto detected account type as adls, reconfiguring storage connection.", + ) az.storage = nil conf.AccountType = "adls" goto reconfigure @@ -168,8 +170,15 @@ func (az *AzStorage) configureAndTest(isParent bool) error { if isParent { err = az.storage.TestPipeline() if err != nil { - log.Err("AzStorage::configureAndTest : Failed to validate credentials [%s]", err.Error()) - return fmt.Errorf("failed to authenticate %s credentials with error [%s]", az.Name(), err.Error()) + log.Err( + "AzStorage::configureAndTest : Failed to validate credentials [%s]", + err.Error(), + ) + return fmt.Errorf( + "failed to authenticate %s credentials with error [%s]", + az.Name(), + err.Error(), + ) } } @@ -210,7 +219,11 @@ func (az *AzStorage) CreateDir(options internal.CreateDirOptions) error { err := az.storage.CreateDirectory(internal.TruncateDirName(options.Name)) if err == nil { - azStatsCollector.PushEvents(createDir, options.Name, map[string]any{mode: options.Mode.String()}) + azStatsCollector.PushEvents( + createDir, + options.Name, + map[string]any{mode: options.Mode.String()}, + ) azStatsCollector.UpdateStats(stats_manager.Increment, createDir, (int64)(1)) } @@ -270,7 +283,7 @@ func (az *AzStorage) ReadDir(options internal.ReadDirOptions) ([]*internal.ObjAt } path := formatListDirName(options.Name) - var iteration int = 0 + var iteration = 0 var marker *string = nil for { new_list, new_marker, err := az.storage.List(path, marker, common.MaxDirListCount) @@ -282,7 +295,11 @@ func (az *AzStorage) ReadDir(options internal.ReadDirOptions) ([]*internal.ObjAt marker = new_marker iteration++ - log.Debug("AzStorage::ReadDir : So far retrieved %d objects in %d iterations", len(blobList), iteration) + log.Debug( + "AzStorage::ReadDir : So far retrieved %d objects in %d iterations", + len(blobList), + iteration, + ) if new_marker == nil || *new_marker == "" { break } @@ -291,8 +308,15 @@ func (az *AzStorage) ReadDir(options internal.ReadDirOptions) ([]*internal.ObjAt return blobList, nil } -func (az *AzStorage) StreamDir(options internal.StreamDirOptions) ([]*internal.ObjAttr, string, error) { - log.Trace("AzStorage::StreamDir : Path %s, offset %d, count %d", options.Name, options.Offset, options.Count) +func (az *AzStorage) StreamDir( + options internal.StreamDirOptions, +) ([]*internal.ObjAttr, string, error) { + log.Trace( + "AzStorage::StreamDir : Path %s, offset %d, count %d", + options.Name, + options.Offset, + options.Count, + ) if az.listBlocked { diff := time.Since(az.startTime) @@ -313,7 +337,12 @@ func (az *AzStorage) StreamDir(options internal.StreamDirOptions) ([]*internal.O return new_list, "", err } - log.Debug("AzStorage::StreamDir : Retrieved %d objects with %s marker for Path %s", len(new_list), options.Token, path) + log.Debug( + "AzStorage::StreamDir : Retrieved %d objects with %s marker for Path %s", + len(new_list), + options.Token, + path, + ) if new_marker == nil { new_marker = to.Ptr("") @@ -352,7 +381,11 @@ func (az *AzStorage) RenameDir(options internal.RenameDirOptions) error { err := az.storage.RenameDirectory(options.Src, options.Dst) if err == nil { - azStatsCollector.PushEvents(renameDir, options.Src, map[string]any{src: options.Src, dest: options.Dst}) + azStatsCollector.PushEvents( + renameDir, + options.Src, + map[string]any{src: options.Src, dest: options.Dst}, + ) azStatsCollector.UpdateStats(stats_manager.Increment, renameDir, (int64)(1)) } return err @@ -376,7 +409,11 @@ func (az *AzStorage) CreateFile(options internal.CreateFileOptions) (*handlemap. } handle.Mtime = time.Now() - azStatsCollector.PushEvents(createFile, options.Name, map[string]any{mode: options.Mode.String()}) + azStatsCollector.PushEvents( + createFile, + options.Name, + map[string]any{mode: options.Mode.String()}, + ) // increment open file handles count azStatsCollector.UpdateStats(stats_manager.Increment, openHandles, (int64)(1)) @@ -436,7 +473,11 @@ func (az *AzStorage) RenameFile(options internal.RenameFileOptions) error { err := az.storage.RenameFile(options.Src, options.Dst, options.SrcAttr) if err == nil { - azStatsCollector.PushEvents(renameFile, options.Src, map[string]any{src: options.Src, dest: options.Dst}) + azStatsCollector.PushEvents( + renameFile, + options.Src, + map[string]any{src: options.Src, dest: options.Dst}, + ) azStatsCollector.UpdateStats(stats_manager.Increment, renameFile, (int64)(1)) } return err @@ -468,7 +509,7 @@ func (az *AzStorage) ReadInBuffer(options *internal.ReadInBufferOptions) (length return 0, syscall.ERANGE } - var dataLen int64 = int64(len(options.Data)) + var dataLen = int64(len(options.Data)) if size < (options.Offset + int64(len(options.Data))) { dataLen = size - options.Offset } @@ -492,7 +533,9 @@ func (az *AzStorage) WriteFile(options *internal.WriteFileOptions) (int, error) return len(options.Data), err } -func (az *AzStorage) GetFileBlockOffsets(options internal.GetFileBlockOffsetsOptions) (*common.BlockOffsetList, error) { +func (az *AzStorage) GetFileBlockOffsets( + options internal.GetFileBlockOffsetsOptions, +) (*common.BlockOffsetList, error) { return az.storage.GetFileBlockOffsets(options.Name) } @@ -524,7 +567,11 @@ func (az *AzStorage) CreateLink(options internal.CreateLinkOptions) error { err := az.storage.CreateLink(options.Name, options.Target) if err == nil { - azStatsCollector.PushEvents(createLink, options.Name, map[string]any{target: options.Target}) + azStatsCollector.PushEvents( + createLink, + options.Name, + map[string]any{target: options.Target}, + ) azStatsCollector.UpdateStats(stats_manager.Increment, createLink, (int64)(1)) } @@ -554,7 +601,11 @@ func (az *AzStorage) Chmod(options internal.ChmodOptions) error { err := az.storage.ChangeMod(options.Name, options.Mode) if err == nil { - azStatsCollector.PushEvents(chmod, options.Name, map[string]any{mode: options.Mode.String()}) + azStatsCollector.PushEvents( + chmod, + options.Name, + map[string]any{mode: options.Mode.String()}, + ) azStatsCollector.UpdateStats(stats_manager.Increment, chmod, (int64)(1)) } @@ -562,7 +613,12 @@ func (az *AzStorage) Chmod(options internal.ChmodOptions) error { } func (az *AzStorage) Chown(options internal.ChownOptions) error { - log.Trace("AzStorage::Chown : Change ownership of file %s to %d-%d", options.Name, options.Owner, options.Group) + log.Trace( + "AzStorage::Chown : Change ownership of file %s to %d-%d", + options.Name, + options.Owner, + options.Group, + ) return az.storage.ChangeOwner(options.Name, options.Owner, options.Group) } @@ -616,22 +672,42 @@ func init() { internal.AddComponent(compName, NewazstorageComponent) RegisterEnvVariables() - useHttps := config.AddBoolFlag("use-https", true, "Enables HTTPS communication with Blob storage.") + useHttps := config.AddBoolFlag( + "use-https", + true, + "Enables HTTPS communication with Blob storage.", + ) config.BindPFlag(compName+".use-https", useHttps) useHttps.Hidden = true - blockListSecFlag := config.AddInt32Flag("cancel-list-on-mount-seconds", 0, "Number of seconds list call is blocked post mount") + blockListSecFlag := config.AddInt32Flag( + "cancel-list-on-mount-seconds", + 0, + "Number of seconds list call is blocked post mount", + ) config.BindPFlag(compName+".block-list-on-mount-sec", blockListSecFlag) blockListSecFlag.Hidden = true - containerNameFlag := config.AddStringFlag("container-name", "", "Configures the name of the container to be mounted") + containerNameFlag := config.AddStringFlag( + "container-name", + "", + "Configures the name of the container to be mounted", + ) config.BindPFlag(compName+".container", containerNameFlag) - useAdls := config.AddBoolFlag("use-adls", false, "Enables blobfuse to access Azure DataLake storage account.") + useAdls := config.AddBoolFlag( + "use-adls", + false, + "Enables blobfuse to access Azure DataLake storage account.", + ) config.BindPFlag(compName+".use-adls", useAdls) useAdls.Hidden = true - maxConcurrency := config.AddUint16Flag("max-concurrency", 32, "Option to override default number of concurrent storage connections") + maxConcurrency := config.AddUint16Flag( + "max-concurrency", + 32, + "Option to override default number of concurrent storage connections", + ) config.BindPFlag(compName+".max-concurrency", maxConcurrency) maxConcurrency.Hidden = true @@ -643,57 +719,108 @@ func init() { config.BindPFlag(compName+".https-proxy", httpsProxy) httpsProxy.Hidden = true - maxRetry := config.AddUint16Flag("max-retry", 3, "Maximum retry count if the failure codes are retryable.") + maxRetry := config.AddUint16Flag( + "max-retry", + 3, + "Maximum retry count if the failure codes are retryable.", + ) config.BindPFlag(compName+".max-retries", maxRetry) maxRetry.Hidden = true - maxRetryInterval := config.AddUint16Flag("max-retry-interval-in-seconds", 3, "Maximum number of seconds between 2 retries.") + maxRetryInterval := config.AddUint16Flag( + "max-retry-interval-in-seconds", + 3, + "Maximum number of seconds between 2 retries.", + ) config.BindPFlag(compName+".max-retry-timeout-sec", maxRetryInterval) maxRetryInterval.Hidden = true - retryDelayFactor := config.AddUint16Flag("retry-delay-factor", 1, "Retry delay between two tries") + retryDelayFactor := config.AddUint16Flag( + "retry-delay-factor", + 1, + "Retry delay between two tries", + ) config.BindPFlag(compName+".retry-backoff-sec", retryDelayFactor) retryDelayFactor.Hidden = true - setContentType := config.AddBoolFlag("set-content-type", true, "Turns on automatic 'content-type' property based on the file extension.") + setContentType := config.AddBoolFlag( + "set-content-type", + true, + "Turns on automatic 'content-type' property based on the file extension.", + ) config.BindPFlag(compName+".set-content-type", setContentType) setContentType.Hidden = true - caCertFile := config.AddStringFlag("ca-cert-file", "", "Specifies the proxy pem certificate path if its not in the default path.") + caCertFile := config.AddStringFlag( + "ca-cert-file", + "", + "Specifies the proxy pem certificate path if its not in the default path.", + ) config.BindPFlag(compName+".ca-cert-file", caCertFile) caCertFile.Hidden = true - debugLibcurl := config.AddStringFlag("debug-libcurl", "", "Flag to allow users to debug libcurl calls.") + debugLibcurl := config.AddStringFlag( + "debug-libcurl", + "", + "Flag to allow users to debug libcurl calls.", + ) config.BindPFlag(compName+".debug-libcurl", debugLibcurl) debugLibcurl.Hidden = true - virtualDir := config.AddBoolFlag("virtual-directory", false, "Support virtual directories without existence of a special marker blob.") + virtualDir := config.AddBoolFlag( + "virtual-directory", + false, + "Support virtual directories without existence of a special marker blob.", + ) config.BindPFlag(compName+".virtual-directory", virtualDir) - subDirectory := config.AddStringFlag("subdirectory", "", "Mount only this sub-directory from given container.") + subDirectory := config.AddStringFlag( + "subdirectory", + "", + "Mount only this sub-directory from given container.", + ) config.BindPFlag(compName+".subdirectory", subDirectory) - disableCompression := config.AddBoolFlag("disable-compression", false, "Disable transport layer compression.") + disableCompression := config.AddBoolFlag( + "disable-compression", + false, + "Disable transport layer compression.", + ) config.BindPFlag(compName+".disable-compression", disableCompression) telemetry := config.AddStringFlag("telemetry", "", "Additional telemetry information.") config.BindPFlag(compName+".telemetry", telemetry) telemetry.Hidden = true - honourACL := config.AddBoolFlag("honour-acl", false, "Match ObjectID in ACL against the one used for authentication.") + honourACL := config.AddBoolFlag( + "honour-acl", + false, + "Match ObjectID in ACL against the one used for authentication.", + ) config.BindPFlag(compName+".honour-acl", honourACL) honourACL.Hidden = true cpkEnabled := config.AddBoolFlag("cpk-enabled", false, "Enable client provided key.") config.BindPFlag(compName+".cpk-enabled", cpkEnabled) - preserveACL := config.AddBoolFlag("preserve-acl", false, "Preserve ACL and Permissions set on file during updates") + preserveACL := config.AddBoolFlag( + "preserve-acl", + false, + "Preserve ACL and Permissions set on file during updates", + ) config.BindPFlag(compName+".preserve-acl", preserveACL) - blobFilter := config.AddStringFlag("filter", "", "Filter string to match blobs. For details refer [https://github.com/Azure/azure-storage-fuse?tab=readme-ov-file#blob-filter]") + blobFilter := config.AddStringFlag( + "filter", + "", + "Filter string to match blobs. For details refer [https://github.com/Azure/azure-storage-fuse?tab=readme-ov-file#blob-filter]", + ) config.BindPFlag(compName+".filter", blobFilter) - config.RegisterFlagCompletionFunc("container-name", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveNoFileComp - }) + config.RegisterFlagCompletionFunc( + "container-name", + func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp + }, + ) } diff --git a/component/azstorage/block_blob.go b/component/azstorage/block_blob.go index 82b9c9e456..7db6994faf 100644 --- a/component/azstorage/block_blob.go +++ b/component/azstorage/block_blob.go @@ -129,7 +129,10 @@ func (bb *BlockBlob) UpdateServiceClient(key, value string) (err error) { // get the service client with updated SAS svcClient, err := bb.Auth.getServiceClient(&bb.Config) if err != nil { - log.Err("BlockBlob::UpdateServiceClient : Failed to get service client [%s]", err.Error()) + log.Err( + "BlockBlob::UpdateServiceClient : Failed to get service client [%s]", + err.Error(), + ) return err } @@ -199,22 +202,30 @@ func (bb *BlockBlob) TestPipeline() error { includeFields.Permissions = true } - listBlobPager := bb.Container.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ - MaxResults: to.Ptr((int32)(2)), - Prefix: &bb.Config.prefixPath, - Include: includeFields, - }) + listBlobPager := bb.Container.NewListBlobsHierarchyPager( + "/", + &container.ListBlobsHierarchyOptions{ + MaxResults: to.Ptr((int32)(2)), + Prefix: &bb.Config.prefixPath, + Include: includeFields, + }, + ) // we are just validating the auth mode used. So, no need to iterate over the pages _, err := listBlobPager.NextPage(context.Background()) if err != nil { - log.Err("BlockBlob::TestPipeline : Failed to validate account with given auth %s", err.Error()) + log.Err( + "BlockBlob::TestPipeline : Failed to validate account with given auth %s", + err.Error(), + ) var respErr *azcore.ResponseError errors.As(err, &respErr) if respErr != nil { if respErr.ErrorCode == "InvalidQueryParameterValue" { // User explicitly mounting FNS account as HNS which is not supported - return fmt.Errorf("BlockBlob::TestPipeline : Detected FNS account being mounted as HNS") + return fmt.Errorf( + "BlockBlob::TestPipeline : Detected FNS account being mounted as HNS", + ) } return fmt.Errorf("BlockBlob::TestPipeline : [%s]", respErr.ErrorCode) } @@ -229,11 +240,14 @@ func (bb *BlockBlob) IsAccountADLS() bool { includeFields := bb.listDetails includeFields.Permissions = true // for FNS account this property will return back error - listBlobPager := bb.Container.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ - MaxResults: to.Ptr((int32)(2)), - Prefix: &bb.Config.prefixPath, - Include: includeFields, - }) + listBlobPager := bb.Container.NewListBlobsHierarchyPager( + "/", + &container.ListBlobsHierarchyOptions{ + MaxResults: to.Ptr((int32)(2)), + Prefix: &bb.Config.prefixPath, + Include: includeFields, + }, + ) // we are just validating the auth mode used. So, no need to iterate over the pages _, err := listBlobPager.NextPage(context.Background()) @@ -254,7 +268,10 @@ func (bb *BlockBlob) IsAccountADLS() bool { } } - log.Crit("BlockBlob::IsAccountADLS : Unable to detect account type, assuming FNS [%s]", err.Error()) + log.Crit( + "BlockBlob::IsAccountADLS : Unable to detect account type, assuming FNS [%s]", + err.Error(), + ) return false } @@ -320,13 +337,14 @@ func (bb *BlockBlob) DeleteFile(name string) (err error) { }) if err != nil { serr := storeBlobErrToErr(err) - if serr == ErrFileNotFound { + switch serr { + case ErrFileNotFound: log.Err("BlockBlob::DeleteFile : %s does not exist", name) return syscall.ENOENT - } else if serr == BlobIsUnderLease { + case BlobIsUnderLease: log.Err("BlockBlob::DeleteFile : %s is under lease [%s]", name, err.Error()) return syscall.EIO - } else { + default: log.Err("BlockBlob::DeleteFile : Failed to delete blob %s [%s]", name, err.Error()) return err } @@ -362,9 +380,13 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. // not specifying source blob metadata, since passing empty metadata headers copies // the source blob metadata to destination blob - copyResponse, err := newBlobClient.StartCopyFromURL(context.Background(), blobClient.URL(), &blob.StartCopyFromURLOptions{ - Tier: bb.Config.defaultTier, - }) + copyResponse, err := newBlobClient.StartCopyFromURL( + context.Background(), + blobClient.URL(), + &blob.StartCopyFromURLOptions{ + Tier: bb.Config.defaultTier, + }, + ) if err != nil { serr := storeBlobErrToErr(err) @@ -378,8 +400,8 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. return err } - var dstLMT *time.Time = copyResponse.LastModified - var dstETag string = sanitizeEtag(copyResponse.ETag) + var dstLMT = copyResponse.LastModified + var dstETag = sanitizeEtag(copyResponse.ETag) copyStatus := copyResponse.CopyStatus var prop blob.GetPropertiesResponse @@ -391,7 +413,11 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. CPKInfo: bb.blobCPKOpt, }) if err != nil { - log.Err("BlockBlob::RenameFile : CopyStats : Failed to get blob properties for %s [%s]", source, err.Error()) + log.Err( + "BlockBlob::RenameFile : CopyStats : Failed to get blob properties for %s [%s]", + source, + err.Error(), + ) } copyStatus = prop.CopyStatus } @@ -413,7 +439,12 @@ func (bb *BlockBlob) RenameFile(source string, target string, srcAttr *internal. // Sometimes backend is able to copy source file to destination but when we try to delete the // source files it returns back with ENOENT. If file was just created on backend it might happen // that it has not been synced yet at all layers and hence delete is not able to find the source file - log.Trace("BlockBlob::RenameFile : %s -> %s, unable to find source. Retrying %d", source, target, retry) + log.Trace( + "BlockBlob::RenameFile : %s -> %s, unable to find source. Retrying %d", + source, + target, + retry, + ) time.Sleep(1 * time.Second) err = bb.DeleteFile(source) } @@ -448,7 +479,11 @@ func (bb *BlockBlob) RenameDirectory(source string, target string) error { srcPath := removePrefixPath(bb.Config.prefixPath, *blobInfo.Name) err = bb.RenameFile(srcPath, strings.Replace(srcPath, source, target, 1), nil) if err != nil { - log.Err("BlockBlob::RenameDirectory : Failed to rename file %s [%s]", srcPath, err.Error) + log.Err( + "BlockBlob::RenameDirectory : Failed to rename file %s [%s]", + srcPath, + err.Error, + ) } } } @@ -464,7 +499,10 @@ func (bb *BlockBlob) RenameDirectory(source string, target string) error { if srcDirPresent { //Some files exist inside the directory return nil } - log.Err("BlockBlob::RenameDirectory : %s marker blob does not exist and Src Directory doesn't Exist", source) + log.Err( + "BlockBlob::RenameDirectory : %s marker blob does not exist and Src Directory doesn't Exist", + source, + ) return syscall.ENOENT } else { log.Err("BlockBlob::RenameDirectory : Failed to get source directory marker blob properties for %s [%s]", source, err.Error()) @@ -485,13 +523,22 @@ func (bb *BlockBlob) getAttrUsingRest(name string) (attr *internal.ObjAttr, err if err != nil { serr := storeBlobErrToErr(err) - if serr == ErrFileNotFound { + switch serr { + case ErrFileNotFound: return attr, syscall.ENOENT - } else if serr == InvalidPermission { - log.Err("BlockBlob::getAttrUsingRest : Insufficient permissions for %s [%s]", name, err.Error()) + case InvalidPermission: + log.Err( + "BlockBlob::getAttrUsingRest : Insufficient permissions for %s [%s]", + name, + err.Error(), + ) return attr, syscall.EACCES - } else { - log.Err("BlockBlob::getAttrUsingRest : Failed to get blob properties for %s [%s]", name, err.Error()) + default: + log.Err( + "BlockBlob::getAttrUsingRest : Failed to get blob properties for %s [%s]", + name, + err.Error(), + ) return attr, err } } @@ -531,13 +578,22 @@ func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err blobs, new_marker, err = bb.List(name, marker, bb.Config.maxResultsForList) if err != nil { e := storeBlobErrToErr(err) - if e == ErrFileNotFound { + switch e { + case ErrFileNotFound: return attr, syscall.ENOENT - } else if e == InvalidPermission { - log.Err("BlockBlob::getAttrUsingList : Insufficient permissions for %s [%s]", name, err.Error()) + case InvalidPermission: + log.Err( + "BlockBlob::getAttrUsingList : Insufficient permissions for %s [%s]", + name, + err.Error(), + ) return attr, syscall.EACCES - } else { - log.Warn("BlockBlob::getAttrUsingList : Failed to list blob properties for %s [%s]", name, err.Error()) + default: + log.Warn( + "BlockBlob::getAttrUsingList : Failed to list blob properties for %s [%s]", + name, + err.Error(), + ) } } @@ -552,7 +608,11 @@ func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err iteration++ blobsRead += len(blobs) - log.Trace("BlockBlob::getAttrUsingList : So far retrieved %d objects in %d iterations", blobsRead, iteration) + log.Trace( + "BlockBlob::getAttrUsingList : So far retrieved %d objects in %d iterations", + blobsRead, + iteration, + ) if new_marker == nil || *new_marker == "" { break } @@ -563,7 +623,11 @@ func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err return nil, syscall.ENOENT } - log.Err("BlockBlob::getAttrUsingList : Failed to list blob properties for %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::getAttrUsingList : Failed to list blob properties for %s [%s]", + name, + err.Error(), + ) return nil, err } @@ -595,7 +659,11 @@ func (bb *BlockBlob) GetAttr(name string) (attr *internal.ObjAttr, err error) { // List : Get a list of blobs matching the given prefix // This fetches the list using a marker so the caller code should handle marker logic // If count=0 - fetch max entries -func (bb *BlockBlob) List(prefix string, marker *string, count int32) ([]*internal.ObjAttr, *string, error) { +func (bb *BlockBlob) List( + prefix string, + marker *string, + count int32, +) ([]*internal.ObjAttr, *string, error) { log.Trace("BlockBlob::List : prefix %s, marker %s", prefix, func(marker *string) string { if marker != nil { return *marker @@ -653,13 +721,16 @@ func (bb *BlockBlob) List(prefix string, marker *string, count int32) ([]*intern func (bb *BlockBlob) getListPath(prefix string) string { listPath := filepath.Join(bb.Config.prefixPath, prefix) - if (prefix != "" && prefix[len(prefix)-1] == '/') || (prefix == "" && bb.Config.prefixPath != "") { + if (prefix != "" && prefix[len(prefix)-1] == '/') || + (prefix == "" && bb.Config.prefixPath != "") { listPath += "/" } return listPath } -func (bb *BlockBlob) processBlobItems(blobItems []*container.BlobItem) ([]*internal.ObjAttr, map[string]bool, error) { +func (bb *BlockBlob) processBlobItems( + blobItems []*container.BlobItem, +) ([]*internal.ObjAttr, map[string]bool, error) { blobList := make([]*internal.ObjAttr, 0) // For some directories 0 byte meta file may not exists so just create a map to figure out such directories dirList := make(map[string]bool) @@ -696,28 +767,41 @@ func (bb *BlockBlob) processBlobItems(blobItems []*container.BlobItem) ([]*inter } func (bb *BlockBlob) getBlobAttr(blobInfo *container.BlobItem) (*internal.ObjAttr, error) { - if blobInfo.Properties.CustomerProvidedKeySHA256 != nil && *blobInfo.Properties.CustomerProvidedKeySHA256 != "" { - log.Trace("BlockBlob::List : blob is encrypted with customer provided key so fetching metadata explicitly using REST") + if blobInfo.Properties.CustomerProvidedKeySHA256 != nil && + *blobInfo.Properties.CustomerProvidedKeySHA256 != "" { + log.Trace( + "BlockBlob::List : blob is encrypted with customer provided key so fetching metadata explicitly using REST", + ) return bb.getAttrUsingRest(*blobInfo.Name) } mode, err := bb.getFileMode(blobInfo.Properties.Permissions) if err != nil { mode = 0 - log.Warn("BlockBlob::getBlobAttr : Failed to get file mode for %s [%s]", *blobInfo.Name, err.Error()) + log.Warn( + "BlockBlob::getBlobAttr : Failed to get file mode for %s [%s]", + *blobInfo.Name, + err.Error(), + ) } attr := &internal.ObjAttr{ - Path: removePrefixPath(bb.Config.prefixPath, *blobInfo.Name), - Name: filepath.Base(*blobInfo.Name), - Size: *blobInfo.Properties.ContentLength, - Mode: mode, - Mtime: *blobInfo.Properties.LastModified, - Atime: bb.dereferenceTime(blobInfo.Properties.LastAccessedOn, *blobInfo.Properties.LastModified), - Ctime: *blobInfo.Properties.LastModified, - Crtime: bb.dereferenceTime(blobInfo.Properties.CreationTime, *blobInfo.Properties.LastModified), - Flags: internal.NewFileBitMap(), - MD5: blobInfo.Properties.ContentMD5, - ETag: sanitizeEtag(blobInfo.Properties.ETag), + Path: removePrefixPath(bb.Config.prefixPath, *blobInfo.Name), + Name: filepath.Base(*blobInfo.Name), + Size: *blobInfo.Properties.ContentLength, + Mode: mode, + Mtime: *blobInfo.Properties.LastModified, + Atime: bb.dereferenceTime( + blobInfo.Properties.LastAccessedOn, + *blobInfo.Properties.LastModified, + ), + Ctime: *blobInfo.Properties.LastModified, + Crtime: bb.dereferenceTime( + blobInfo.Properties.CreationTime, + *blobInfo.Properties.LastModified, + ), + Flags: internal.NewFileBitMap(), + MD5: blobInfo.Properties.ContentMD5, + ETag: sanitizeEtag(blobInfo.Properties.ETag), } parseMetadata(attr, blobInfo.Metadata) @@ -743,7 +827,11 @@ func (bb *BlockBlob) dereferenceTime(input *time.Time, defaultTime time.Time) ti return *input } -func (bb *BlockBlob) processBlobPrefixes(blobPrefixes []*container.BlobPrefix, dirList map[string]bool, blobList *[]*internal.ObjAttr) error { +func (bb *BlockBlob) processBlobPrefixes( + blobPrefixes []*container.BlobPrefix, + dirList map[string]bool, + blobList *[]*internal.ObjAttr, +) error { for _, blobInfo := range blobPrefixes { if _, ok := dirList[*blobInfo.Name]; ok { // marker file found in current iteration, skip adding the directory @@ -798,7 +886,9 @@ func (bb *BlockBlob) createDirAttr(name string) *internal.ObjAttr { return attr } -func (bb *BlockBlob) createDirAttrWithPermissions(blobInfo *container.BlobPrefix) (*internal.ObjAttr, error) { +func (bb *BlockBlob) createDirAttrWithPermissions( + blobInfo *container.BlobPrefix, +) (*internal.ObjAttr, error) { if blobInfo.Properties == nil { return nil, fmt.Errorf("failed to get properties of blobprefix %s", *blobInfo.Name) } @@ -806,20 +896,30 @@ func (bb *BlockBlob) createDirAttrWithPermissions(blobInfo *container.BlobPrefix mode, err := bb.getFileMode(blobInfo.Properties.Permissions) if err != nil { mode = 0 - log.Warn("BlockBlob::createDirAttrWithPermissions : Failed to get file mode for %s [%s]", *blobInfo.Name, err.Error()) + log.Warn( + "BlockBlob::createDirAttrWithPermissions : Failed to get file mode for %s [%s]", + *blobInfo.Name, + err.Error(), + ) } name := strings.TrimSuffix(*blobInfo.Name, "/") attr := &internal.ObjAttr{ - Path: removePrefixPath(bb.Config.prefixPath, name), - Name: filepath.Base(name), - Size: *blobInfo.Properties.ContentLength, - Mode: mode, - Mtime: *blobInfo.Properties.LastModified, - Atime: bb.dereferenceTime(blobInfo.Properties.LastAccessedOn, *blobInfo.Properties.LastModified), - Ctime: *blobInfo.Properties.LastModified, - Crtime: bb.dereferenceTime(blobInfo.Properties.CreationTime, *blobInfo.Properties.LastModified), - Flags: internal.NewDirBitMap(), + Path: removePrefixPath(bb.Config.prefixPath, name), + Name: filepath.Base(name), + Size: *blobInfo.Properties.ContentLength, + Mode: mode, + Mtime: *blobInfo.Properties.LastModified, + Atime: bb.dereferenceTime( + blobInfo.Properties.LastAccessedOn, + *blobInfo.Properties.LastModified, + ), + Ctime: *blobInfo.Properties.LastModified, + Crtime: bb.dereferenceTime( + blobInfo.Properties.CreationTime, + *blobInfo.Properties.LastModified, + ), + Flags: internal.NewDirBitMap(), } return attr, nil @@ -829,10 +929,19 @@ func (bb *BlockBlob) createDirAttrWithPermissions(blobInfo *container.BlobPrefix func trackDownload(name string, bytesTransferred int64, count int64, downloadPtr *int64) { if bytesTransferred >= (*downloadPtr)*100*common.MbToBytes || bytesTransferred == count { (*downloadPtr)++ - log.Debug("BlockBlob::trackDownload : Download: Blob = %v, Bytes transferred = %v, Size = %v", name, bytesTransferred, count) + log.Debug( + "BlockBlob::trackDownload : Download: Blob = %v, Bytes transferred = %v, Size = %v", + name, + bytesTransferred, + count, + ) // send the download progress as an event - azStatsCollector.PushEvents(downloadProgress, name, map[string]any{bytesTfrd: bytesTransferred, size: count}) + azStatsCollector.PushEvents( + downloadProgress, + name, + map[string]any{bytesTfrd: bytesTransferred, size: count}, + ) } } @@ -907,33 +1016,34 @@ func (bb *BlockBlob) ReadToFile(name string, offset int64, count int64, fi *os.F } // ReadBuffer : Download a specific range from a blob to a buffer -func (bb *BlockBlob) ReadBuffer(name string, offset int64, len int64) ([]byte, error) { - log.Trace("BlockBlob::ReadBuffer : name %s, offset %v, len %v", name, offset, len) +func (bb *BlockBlob) ReadBuffer(name string, offset int64, length int64) ([]byte, error) { + log.Trace("BlockBlob::ReadBuffer : name %s, offset %v, len %v", name, offset, length) var buff []byte - if len == 0 { + if length == 0 { attr, err := bb.GetAttr(name) if err != nil { return buff, err } - len = attr.Size - offset + length = attr.Size - offset } - buff = make([]byte, len) + buff = make([]byte, length) blobClient := bb.Container.NewBlobClient(filepath.Join(bb.Config.prefixPath, name)) dlOpts := (blob.DownloadBufferOptions)(*bb.downloadOptions) dlOpts.Range = blob.HTTPRange{ Offset: offset, - Count: len, + Count: length, } _, err := blobClient.DownloadBuffer(context.Background(), buff, &dlOpts) if err != nil { e := storeBlobErrToErr(err) - if e == ErrFileNotFound { + switch e { + case ErrFileNotFound: return buff, syscall.ENOENT - } else if e == InvalidRange { + case InvalidRange: return buff, syscall.ERANGE } @@ -945,7 +1055,13 @@ func (bb *BlockBlob) ReadBuffer(name string, offset int64, len int64) ([]byte, e } // ReadInBuffer : Download specific range from a file to a user provided buffer -func (bb *BlockBlob) ReadInBuffer(name string, offset int64, len int64, data []byte, etag *string) error { +func (bb *BlockBlob) ReadInBuffer( + name string, + offset int64, + length int64, + data []byte, + etag *string, +) error { // log.Trace("BlockBlob::ReadInBuffer : name %s", name) if etag != nil { *etag = "" @@ -959,7 +1075,7 @@ func (bb *BlockBlob) ReadInBuffer(name string, offset int64, len int64, data []b opt := &blob.DownloadStreamOptions{ Range: blob.HTTPRange{ Offset: offset, - Count: len, + Count: length, }, CPKInfo: bb.blobCPKOpt, } @@ -968,13 +1084,18 @@ func (bb *BlockBlob) ReadInBuffer(name string, offset int64, len int64, data []b if err != nil { e := storeBlobErrToErr(err) - if e == ErrFileNotFound { + switch e { + case ErrFileNotFound: return syscall.ENOENT - } else if e == InvalidRange { + case InvalidRange: return syscall.ERANGE } - log.Err("BlockBlob::ReadInBufferWithETag : Failed to download blob %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::ReadInBufferWithETag : Failed to download blob %s [%s]", + name, + err.Error(), + ) return err } @@ -982,18 +1103,29 @@ func (bb *BlockBlob) ReadInBuffer(name string, offset int64, len int64, data []b dataRead, err := io.ReadFull(streamBody, data) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - log.Err("BlockBlob::ReadInBuffer : Failed to copy data from body to buffer for blob %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::ReadInBuffer : Failed to copy data from body to buffer for blob %s [%s]", + name, + err.Error(), + ) return err } if dataRead < 0 { - log.Err("BlockBlob::ReadInBuffer : Failed to copy data from body to buffer for blob %s", name) + log.Err( + "BlockBlob::ReadInBuffer : Failed to copy data from body to buffer for blob %s", + name, + ) return errors.New("failed to copy data from body to buffer") } err = streamBody.Close() if err != nil { - log.Err("BlockBlob::ReadInBuffer : Failed to close body for blob %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::ReadInBuffer : Failed to close body for blob %s [%s]", + name, + err.Error(), + ) } if etag != nil { @@ -1006,7 +1138,10 @@ func (bb *BlockBlob) ReadInBuffer(name string, offset int64, len int64, data []b func (bb *BlockBlob) calculateBlockSize(name string, fileSize int64) (blockSize int64, err error) { // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error if fileSize > MaxBlobSize { - log.Err("BlockBlob::calculateBlockSize : buffer is too large to upload to a block blob %s", name) + log.Err( + "BlockBlob::calculateBlockSize : buffer is too large to upload to a block blob %s", + name, + ) err = errors.New("buffer is too large to upload to a block blob") return 0, err } @@ -1046,15 +1181,28 @@ func (bb *BlockBlob) calculateBlockSize(name string, fileSize int64) (blockSize func trackUpload(name string, bytesTransferred int64, count int64, uploadPtr *int64) { if bytesTransferred >= (*uploadPtr)*100*common.MbToBytes || bytesTransferred == count { (*uploadPtr)++ - log.Debug("BlockBlob::trackUpload : Upload: Blob = %v, Bytes transferred = %v, Size = %v", name, bytesTransferred, count) + log.Debug( + "BlockBlob::trackUpload : Upload: Blob = %v, Bytes transferred = %v, Size = %v", + name, + bytesTransferred, + count, + ) // send upload progress as event - azStatsCollector.PushEvents(uploadProgress, name, map[string]any{bytesTfrd: bytesTransferred, size: count}) + azStatsCollector.PushEvents( + uploadProgress, + name, + map[string]any{bytesTfrd: bytesTransferred, size: count}, + ) } } // WriteFromFile : Upload local file to blob -func (bb *BlockBlob) WriteFromFile(name string, metadata map[string]*string, fi *os.File) (err error) { +func (bb *BlockBlob) WriteFromFile( + name string, + metadata map[string]*string, + fi *os.File, +) (err error) { log.Trace("BlockBlob::WriteFromFile : name %s", name) //defer exectime.StatTimeCurrentBlock("WriteFromFile::WriteFromFile")() @@ -1114,13 +1262,22 @@ func (bb *BlockBlob) WriteFromFile(name string, metadata map[string]*string, fi if err != nil { serr := storeBlobErrToErr(err) - if serr == BlobIsUnderLease { - log.Err("BlockBlob::WriteFromFile : %s is under a lease, can not update file [%s]", name, err.Error()) + switch serr { + case BlobIsUnderLease: + log.Err( + "BlockBlob::WriteFromFile : %s is under a lease, can not update file [%s]", + name, + err.Error(), + ) return syscall.EIO - } else if serr == InvalidPermission { - log.Err("BlockBlob::WriteFromFile : Insufficient permissions for %s [%s]", name, err.Error()) + case InvalidPermission: + log.Err( + "BlockBlob::WriteFromFile : Insufficient permissions for %s [%s]", + name, + err.Error(), + ) return syscall.EACCES - } else { + default: log.Err("BlockBlob::WriteFromFile : Failed to upload blob %s [%s]", name, err.Error()) } return err @@ -1168,7 +1325,11 @@ func (bb *BlockBlob) GetFileBlockOffsets(name string) (*common.BlockOffsetList, blockList := common.BlockOffsetList{} blobClient := bb.Container.NewBlockBlobClient(filepath.Join(bb.Config.prefixPath, name)) - storageBlockList, err := blobClient.GetBlockList(context.Background(), blockblob.BlockListTypeCommitted, nil) + storageBlockList, err := blobClient.GetBlockList( + context.Background(), + blockblob.BlockListTypeCommitted, + nil, + ) if err != nil { log.Err("BlockBlob::GetFileBlockOffsets : Failed to get block list %s ", name, err.Error()) @@ -1209,14 +1370,21 @@ func (bb *BlockBlob) createBlock(blockIdLength, startIndex, size int64) *common. } // create new blocks based on the offset and total length we're adding to the file -func (bb *BlockBlob) createNewBlocks(blockList *common.BlockOffsetList, offset, length int64) (int64, error) { +func (bb *BlockBlob) createNewBlocks( + blockList *common.BlockOffsetList, + offset, length int64, +) (int64, error) { blockSize := bb.Config.blockSize prevIndex := blockList.BlockList[len(blockList.BlockList)-1].EndIndex numOfBlocks := int64(len(blockList.BlockList)) if blockSize == 0 { blockSize = (16 * 1024 * 1024) - if math.Ceil((float64)(numOfBlocks)+(float64)(length)/(float64)(blockSize)) > blockblob.MaxBlocks { - blockSize = int64(math.Ceil((float64)(length) / (float64)(blockblob.MaxBlocks-numOfBlocks))) + if math.Ceil( + (float64)(numOfBlocks)+(float64)(length)/(float64)(blockSize), + ) > blockblob.MaxBlocks { + blockSize = int64( + math.Ceil((float64)(length) / (float64)(blockblob.MaxBlocks-numOfBlocks)), + ) if blockSize > blockblob.MaxStageBlockBytes { return 0, errors.New("cannot accommodate data within the block limit") } @@ -1237,7 +1405,11 @@ func (bb *BlockBlob) createNewBlocks(blockList *common.BlockOffsetList, offset, return bufferSize, nil } -func (bb *BlockBlob) removeBlocks(blockList *common.BlockOffsetList, size int64, name string) *common.BlockOffsetList { +func (bb *BlockBlob) removeBlocks( + blockList *common.BlockOffsetList, + size int64, + name string, +) *common.BlockOffsetList { _, index := blockList.BinarySearch(size) // if the start index is equal to new size - block should be removed - move one index back if blockList.BlockList[index].StartIndex == size { @@ -1267,7 +1439,11 @@ func (bb *BlockBlob) TruncateFile(name string, size int64) error { // log.Trace("BlockBlob::TruncateFile : name=%s, size=%d", name, size) attr, err := bb.GetAttr(name) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to get attributes of file %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::TruncateFile : Failed to get attributes of file %s [%s]", + name, + err.Error(), + ) if err == syscall.ENOENT { return err } @@ -1298,7 +1474,11 @@ func (bb *BlockBlob) TruncateFile(name string, size int64) error { CPKInfo: bb.blobCPKOpt, }) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to stage block for %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::TruncateFile : Failed to stage block for %s [%s]", + name, + err.Error(), + ) return err } } @@ -1308,7 +1488,11 @@ func (bb *BlockBlob) TruncateFile(name string, size int64) error { err = bb.CommitBlocks(blobName, blkList, nil) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to commit blocks for %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::TruncateFile : Failed to commit blocks for %s [%s]", + name, + err.Error(), + ) return err } } else { @@ -1329,7 +1513,11 @@ func (bb *BlockBlob) TruncateFile(name string, size int64) error { } err = bb.WriteFromBuffer(name, nil, data) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to write from buffer file %s", name, err.Error()) + log.Err( + "BlockBlob::TruncateFile : Failed to write from buffer file %s", + name, + err.Error(), + ) return err } } else { @@ -1469,22 +1657,35 @@ func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { } // TODO: make a similar method facing stream that would enable us to write to cached blocks then stage and commit -func (bb *BlockBlob) stageAndCommitModifiedBlocks(name string, data []byte, offsetList *common.BlockOffsetList) error { +func (bb *BlockBlob) stageAndCommitModifiedBlocks( + name string, + data []byte, + offsetList *common.BlockOffsetList, +) error { blobClient := bb.Container.NewBlockBlobClient(filepath.Join(bb.Config.prefixPath, name)) blockOffset := int64(0) var blockIDList []string for _, blk := range offsetList.BlockList { blockIDList = append(blockIDList, blk.Id) if blk.Dirty() { - _, err := blobClient.StageBlock(context.Background(), + _, err := blobClient.StageBlock( + context.Background(), blk.Id, - streaming.NopCloser(bytes.NewReader(data[blockOffset:(blk.EndIndex-blk.StartIndex)+blockOffset])), + streaming.NopCloser( + bytes.NewReader(data[blockOffset:(blk.EndIndex-blk.StartIndex)+blockOffset]), + ), &blockblob.StageBlockOptions{ CPKInfo: bb.blobCPKOpt, - }) + }, + ) if err != nil { - log.Err("BlockBlob::stageAndCommitModifiedBlocks : Failed to stage to blob %s at block %v [%s]", name, blockOffset, err.Error()) + log.Err( + "BlockBlob::stageAndCommitModifiedBlocks : Failed to stage to blob %s at block %v [%s]", + name, + blockOffset, + err.Error(), + ) return err } blockOffset = (blk.EndIndex - blk.StartIndex) + blockOffset @@ -1501,7 +1702,11 @@ func (bb *BlockBlob) stageAndCommitModifiedBlocks(name string, data []byte, offs }) if err != nil { - log.Err("BlockBlob::stageAndCommitModifiedBlocks : Failed to commit block list to blob %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::stageAndCommitModifiedBlocks : Failed to commit block list to blob %s [%s]", + name, + err.Error(), + ) return err } return nil @@ -1532,7 +1737,13 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er CPKInfo: bb.blobCPKOpt, }) if err != nil { - log.Err("BlockBlob::StageAndCommit : Failed to stage to blob %s with ID %s at block %v [%s]", name, blk.Id, blk.StartIndex, err.Error()) + log.Err( + "BlockBlob::StageAndCommit : Failed to stage to blob %s with ID %s at block %v [%s]", + name, + blk.Id, + blk.StartIndex, + err.Error(), + ) return err } staged = true @@ -1553,7 +1764,11 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er // AccessConditions: &blob.AccessConditions{ModifiedAccessConditions: &blob.ModifiedAccessConditions{IfMatch: bol.Etag}}, }) if err != nil { - log.Err("BlockBlob::StageAndCommit : Failed to commit block list to blob %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::StageAndCommit : Failed to commit block list to blob %s [%s]", + name, + err.Error(), + ) return err } // update the etag @@ -1594,7 +1809,11 @@ func (bb *BlockBlob) ChangeOwner(name string, _ int, _ int) error { func (bb *BlockBlob) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) { blobClient := bb.Container.NewBlockBlobClient(filepath.Join(bb.Config.prefixPath, name)) - storageBlockList, err := blobClient.GetBlockList(context.Background(), blockblob.BlockListTypeCommitted, nil) + storageBlockList, err := blobClient.GetBlockList( + context.Background(), + blockblob.BlockListTypeCommitted, + nil, + ) if err != nil { log.Err("BlockBlob::GetFileBlockOffsets : Failed to get block list %s ", name, err.Error()) @@ -1637,7 +1856,12 @@ func (bb *BlockBlob) StageBlock(name string, data []byte, id string) error { }) if err != nil { - log.Err("BlockBlob::StageBlock : Failed to stage to blob %s with ID %s [%s]", name, id, err.Error()) + log.Err( + "BlockBlob::StageBlock : Failed to stage to blob %s with ID %s [%s]", + name, + id, + err.Error(), + ) return err } @@ -1663,7 +1887,11 @@ func (bb *BlockBlob) CommitBlocks(name string, blockList []string, newEtag *stri }) if err != nil { - log.Err("BlockBlob::CommitBlocks : Failed to commit block list to blob %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::CommitBlocks : Failed to commit block list to blob %s [%s]", + name, + err.Error(), + ) return err } diff --git a/component/azstorage/config.go b/component/azstorage/config.go index fa8e9cb8e9..80c9af9ae2 100644 --- a/component/azstorage/config.go +++ b/component/azstorage/config.go @@ -152,55 +152,55 @@ const ( ) type AzStorageOptions struct { - AccountType string `config:"type" yaml:"type,omitempty"` - UseHTTP bool `config:"use-http" yaml:"use-http,omitempty"` - AccountName string `config:"account-name" yaml:"account-name,omitempty"` - AccountKey string `config:"account-key" yaml:"account-key,omitempty"` - SaSKey string `config:"sas" yaml:"sas,omitempty"` - ApplicationID string `config:"appid" yaml:"appid,omitempty"` - ResourceID string `config:"resid" yaml:"resid,omitempty"` - ObjectID string `config:"objid" yaml:"objid,omitempty"` - TenantID string `config:"tenantid" yaml:"tenantid,omitempty"` - ClientID string `config:"clientid" yaml:"clientid,omitempty"` - ClientSecret string `config:"clientsecret" yaml:"clientsecret,omitempty"` - OAuthTokenFilePath string `config:"oauth-token-path" yaml:"oauth-token-path,omitempty"` - WorkloadIdentityToken string `config:"workload-identity-token" yaml:"workload-identity-token,omitempty"` - ActiveDirectoryEndpoint string `config:"aadendpoint" yaml:"aadendpoint,omitempty"` - Endpoint string `config:"endpoint" yaml:"endpoint,omitempty"` - AuthMode string `config:"mode" yaml:"mode,omitempty"` - Container string `config:"container" yaml:"container,omitempty"` - PrefixPath string `config:"subdirectory" yaml:"subdirectory,omitempty"` - BlockSize int64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` - MaxConcurrency uint16 `config:"max-concurrency" yaml:"max-concurrency,omitempty"` - DefaultTier string `config:"tier" yaml:"tier,omitempty"` - CancelListForSeconds uint16 `config:"block-list-on-mount-sec" yaml:"block-list-on-mount-sec,omitempty"` - MaxRetries int32 `config:"max-retries" yaml:"max-retries,omitempty"` - MaxTimeout int32 `config:"max-retry-timeout-sec" yaml:"max-retry-timeout-sec,omitempty"` - BackoffTime int32 `config:"retry-backoff-sec" yaml:"retry-backoff-sec,omitempty"` - MaxRetryDelay int32 `config:"max-retry-delay-sec" yaml:"max-retry-delay-sec,omitempty"` - HttpProxyAddress string `config:"http-proxy" yaml:"http-proxy,omitempty"` - HttpsProxyAddress string `config:"https-proxy" yaml:"https-proxy,omitempty"` - FailUnsupportedOp bool `config:"fail-unsupported-op" yaml:"fail-unsupported-op,omitempty"` - AuthResourceString string `config:"auth-resource" yaml:"auth-resource,omitempty"` - UpdateMD5 bool `config:"update-md5" yaml:"update-md5"` - ValidateMD5 bool `config:"validate-md5" yaml:"validate-md5"` - VirtualDirectory bool `config:"virtual-directory" yaml:"virtual-directory"` - MaxResultsForList int32 `config:"max-results-for-list" yaml:"max-results-for-list"` - DisableCompression bool `config:"disable-compression" yaml:"disable-compression"` - Telemetry string `config:"telemetry" yaml:"telemetry"` - HonourACL bool `config:"honour-acl" yaml:"honour-acl"` - CPKEnabled bool `config:"cpk-enabled" yaml:"cpk-enabled"` - CPKEncryptionKey string `config:"cpk-encryption-key" yaml:"cpk-encryption-key"` + AccountType string `config:"type" yaml:"type,omitempty"` + UseHTTP bool `config:"use-http" yaml:"use-http,omitempty"` + AccountName string `config:"account-name" yaml:"account-name,omitempty"` + AccountKey string `config:"account-key" yaml:"account-key,omitempty"` + SaSKey string `config:"sas" yaml:"sas,omitempty"` + ApplicationID string `config:"appid" yaml:"appid,omitempty"` + ResourceID string `config:"resid" yaml:"resid,omitempty"` + ObjectID string `config:"objid" yaml:"objid,omitempty"` + TenantID string `config:"tenantid" yaml:"tenantid,omitempty"` + ClientID string `config:"clientid" yaml:"clientid,omitempty"` + ClientSecret string `config:"clientsecret" yaml:"clientsecret,omitempty"` + OAuthTokenFilePath string `config:"oauth-token-path" yaml:"oauth-token-path,omitempty"` + WorkloadIdentityToken string `config:"workload-identity-token" yaml:"workload-identity-token,omitempty"` + ActiveDirectoryEndpoint string `config:"aadendpoint" yaml:"aadendpoint,omitempty"` + Endpoint string `config:"endpoint" yaml:"endpoint,omitempty"` + AuthMode string `config:"mode" yaml:"mode,omitempty"` + Container string `config:"container" yaml:"container,omitempty"` + PrefixPath string `config:"subdirectory" yaml:"subdirectory,omitempty"` + BlockSize int64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` + MaxConcurrency uint16 `config:"max-concurrency" yaml:"max-concurrency,omitempty"` + DefaultTier string `config:"tier" yaml:"tier,omitempty"` + CancelListForSeconds uint16 `config:"block-list-on-mount-sec" yaml:"block-list-on-mount-sec,omitempty"` + MaxRetries int32 `config:"max-retries" yaml:"max-retries,omitempty"` + MaxTimeout int32 `config:"max-retry-timeout-sec" yaml:"max-retry-timeout-sec,omitempty"` + BackoffTime int32 `config:"retry-backoff-sec" yaml:"retry-backoff-sec,omitempty"` + MaxRetryDelay int32 `config:"max-retry-delay-sec" yaml:"max-retry-delay-sec,omitempty"` + HttpProxyAddress string `config:"http-proxy" yaml:"http-proxy,omitempty"` + HttpsProxyAddress string `config:"https-proxy" yaml:"https-proxy,omitempty"` + FailUnsupportedOp bool `config:"fail-unsupported-op" yaml:"fail-unsupported-op,omitempty"` + AuthResourceString string `config:"auth-resource" yaml:"auth-resource,omitempty"` + UpdateMD5 bool `config:"update-md5" yaml:"update-md5"` + ValidateMD5 bool `config:"validate-md5" yaml:"validate-md5"` + VirtualDirectory bool `config:"virtual-directory" yaml:"virtual-directory"` + MaxResultsForList int32 `config:"max-results-for-list" yaml:"max-results-for-list"` + DisableCompression bool `config:"disable-compression" yaml:"disable-compression"` + Telemetry string `config:"telemetry" yaml:"telemetry"` + HonourACL bool `config:"honour-acl" yaml:"honour-acl"` + CPKEnabled bool `config:"cpk-enabled" yaml:"cpk-enabled"` + CPKEncryptionKey string `config:"cpk-encryption-key" yaml:"cpk-encryption-key"` CPKEncryptionKeySha256 string `config:"cpk-encryption-key-sha256" yaml:"cpk-encryption-key-sha256"` - PreserveACL bool `config:"preserve-acl" yaml:"preserve-acl"` - Filter string `config:"filter" yaml:"filter"` - UserAssertion string `config:"user-assertion" yaml:"user-assertions"` + PreserveACL bool `config:"preserve-acl" yaml:"preserve-acl"` + Filter string `config:"filter" yaml:"filter"` + UserAssertion string `config:"user-assertion" yaml:"user-assertions"` // v1 support - UseAdls bool `config:"use-adls" yaml:"-"` - UseHTTPS bool `config:"use-https" yaml:"-"` + UseAdls bool `config:"use-adls" yaml:"-"` + UseHTTPS bool `config:"use-https" yaml:"-"` SetContentType bool `config:"set-content-type" yaml:"-"` - CaCertFile string `config:"ca-cert-file" yaml:"-"` + CaCertFile string `config:"ca-cert-file" yaml:"-"` } // RegisterEnvVariables : Register environment varilables @@ -248,8 +248,8 @@ func formatEndpointProtocol(endpoint string, http bool) string { // If the pvtEndpoint does not have protocol mentioned in front, pvtEndpoint parsing will fail while // creating URI also the string shall end with "/" if correctedEndpoint != "" { - if !(strings.HasPrefix(correctedEndpoint, "https://") || - strings.HasPrefix(correctedEndpoint, "http://")) { + if !strings.HasPrefix(correctedEndpoint, "https://") && + !strings.HasPrefix(correctedEndpoint, "http://") { if http { correctedEndpoint = "http://" + correctedEndpoint } else { @@ -294,7 +294,9 @@ func validateMsiConfig(opt AzStorageOptions) error { v[opt.ResourceID] = true } if len(v) > 1 { - return errors.New("client ID, object ID and MSI resource ID are mutually exclusive and zero or one of the inputs need to be provided") + return errors.New( + "client ID, object ID and MSI resource ID are mutually exclusive and zero or one of the inputs need to be provided", + ) } return nil } @@ -338,7 +340,10 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { if opt.BlockSize != 0 { if opt.BlockSize > blockblob.MaxStageBlockBytes { - log.Err("ParseAndValidateConfig : Block size is too large. Block size has to be smaller than %s Bytes", blockblob.MaxStageBlockBytes) + log.Err( + "ParseAndValidateConfig : Block size is too large. Block size has to be smaller than %s Bytes", + blockblob.MaxStageBlockBytes, + ) return errors.New("block size is too large") } az.stConfig.blockSize = opt.BlockSize * 1024 * 1024 @@ -372,7 +377,9 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { // Validate endpoint if opt.Endpoint == "" { - log.Warn("ParseAndValidateConfig : account endpoint not provided, assuming the default .core.windows.net style endpoint") + log.Warn( + "ParseAndValidateConfig : account endpoint not provided, assuming the default .core.windows.net style endpoint", + ) if az.stConfig.authConfig.AccountType == EAccountType.BLOCK() { opt.Endpoint = fmt.Sprintf("%s.blob.core.windows.net", opt.AccountName) } else if az.stConfig.authConfig.AccountType == EAccountType.ADLS() { @@ -380,11 +387,20 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { } } az.stConfig.authConfig.Endpoint = opt.Endpoint - az.stConfig.authConfig.Endpoint = formatEndpointProtocol(az.stConfig.authConfig.Endpoint, opt.UseHTTP) - az.stConfig.authConfig.Endpoint = formatEndpointAccountType(az.stConfig.authConfig.Endpoint, az.stConfig.authConfig.AccountType) + az.stConfig.authConfig.Endpoint = formatEndpointProtocol( + az.stConfig.authConfig.Endpoint, + opt.UseHTTP, + ) + az.stConfig.authConfig.Endpoint = formatEndpointAccountType( + az.stConfig.authConfig.Endpoint, + az.stConfig.authConfig.AccountType, + ) az.stConfig.authConfig.ActiveDirectoryEndpoint = opt.ActiveDirectoryEndpoint - az.stConfig.authConfig.ActiveDirectoryEndpoint = formatEndpointProtocol(az.stConfig.authConfig.ActiveDirectoryEndpoint, false) + az.stConfig.authConfig.ActiveDirectoryEndpoint = formatEndpointProtocol( + az.stConfig.authConfig.ActiveDirectoryEndpoint, + false, + ) // If subdirectory is mounted, take the prefix path az.stConfig.prefixPath = removeLeadingSlashes(opt.PrefixPath) @@ -416,7 +432,10 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { } } az.stConfig.proxyAddress = formatEndpointProtocol(az.stConfig.proxyAddress, opt.UseHTTP) - log.Info("ParseAndValidateConfig : using the following proxy address from the config file: %s", az.stConfig.proxyAddress) + log.Info( + "ParseAndValidateConfig : using the following proxy address from the config file: %s", + az.stConfig.proxyAddress, + ) err = ParseAndReadDynamicConfig(az, opt, false) if err != nil { @@ -463,9 +482,13 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { az.stConfig.authConfig.ResourceID = opt.ResourceID case EAuthType.SPN(): az.stConfig.authConfig.AuthMode = EAuthType.SPN() - if opt.ClientID == "" || (opt.ClientSecret == "" && opt.OAuthTokenFilePath == "" && opt.WorkloadIdentityToken == "") || opt.TenantID == "" { + if opt.ClientID == "" || + (opt.ClientSecret == "" && opt.OAuthTokenFilePath == "" && opt.WorkloadIdentityToken == "") || + opt.TenantID == "" { //lint:ignore ST1005 ignore - return errors.New("Client ID, Tenant ID or Client Secret, OAuthTokenFilePath, WorkloadIdentityToken not provided") + return errors.New( + "Client ID, Tenant ID or Client Secret, OAuthTokenFilePath, WorkloadIdentityToken not provided", + ) } az.stConfig.authConfig.ClientID = opt.ClientID az.stConfig.authConfig.ClientSecret = opt.ClientSecret @@ -516,7 +539,9 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { log.Warn("unsupported v1 CLI parameter: set-content-type is always true in blobfuse2.") } if config.IsSet(compName + ".ca-cert-file") { - log.Warn("unsupported v1 CLI parameter: ca-cert-file is not supported in blobfuse2. Use the default ca cert path for your environment.") + log.Warn( + "unsupported v1 CLI parameter: ca-cert-file is not supported in blobfuse2. Use the default ca cert path for your environment.", + ) } if config.IsSet(compName + ".debug-libcurl") { log.Warn("unsupported v1 CLI parameter: debug-libcurl is not applicable in blobfuse2.") @@ -530,14 +555,43 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { } } - log.Crit("ParseAndValidateConfig : account %s, container %s, account-type %s, auth %s, prefix %s, endpoint %s, MD5 %v %v, virtual-directory %v, disable-compression %v, CPK %v", - az.stConfig.authConfig.AccountName, az.stConfig.container, az.stConfig.authConfig.AccountType, az.stConfig.authConfig.AuthMode, - az.stConfig.prefixPath, az.stConfig.authConfig.Endpoint, az.stConfig.validateMD5, az.stConfig.updateMD5, az.stConfig.virtualDirectory, az.stConfig.disableCompression, az.stConfig.cpkEnabled) - log.Crit("ParseAndValidateConfig : use-HTTP %t, block-size %d, max-concurrency %d, default-tier %s, fail-unsupported-op %t, mount-all-containers %t", az.stConfig.authConfig.UseHTTP, az.stConfig.blockSize, az.stConfig.maxConcurrency, az.stConfig.defaultTier, az.stConfig.ignoreAccessModifiers, az.stConfig.mountAllContainers) - log.Crit("ParseAndValidateConfig : Retry Config: retry-count %d, max-timeout %d, backoff-time %d, max-delay %d, preserve-acl: %v", - az.stConfig.maxRetries, az.stConfig.maxTimeout, az.stConfig.backoffTime, az.stConfig.maxRetryDelay, az.stConfig.preserveACL) - - log.Crit("ParseAndValidateConfig : Telemetry : %s, honour-ACL %v", az.stConfig.telemetry, az.stConfig.honourACL) + log.Crit( + "ParseAndValidateConfig : account %s, container %s, account-type %s, auth %s, prefix %s, endpoint %s, MD5 %v %v, virtual-directory %v, disable-compression %v, CPK %v", + az.stConfig.authConfig.AccountName, + az.stConfig.container, + az.stConfig.authConfig.AccountType, + az.stConfig.authConfig.AuthMode, + az.stConfig.prefixPath, + az.stConfig.authConfig.Endpoint, + az.stConfig.validateMD5, + az.stConfig.updateMD5, + az.stConfig.virtualDirectory, + az.stConfig.disableCompression, + az.stConfig.cpkEnabled, + ) + log.Crit( + "ParseAndValidateConfig : use-HTTP %t, block-size %d, max-concurrency %d, default-tier %s, fail-unsupported-op %t, mount-all-containers %t", + az.stConfig.authConfig.UseHTTP, + az.stConfig.blockSize, + az.stConfig.maxConcurrency, + az.stConfig.defaultTier, + az.stConfig.ignoreAccessModifiers, + az.stConfig.mountAllContainers, + ) + log.Crit( + "ParseAndValidateConfig : Retry Config: retry-count %d, max-timeout %d, backoff-time %d, max-delay %d, preserve-acl: %v", + az.stConfig.maxRetries, + az.stConfig.maxTimeout, + az.stConfig.backoffTime, + az.stConfig.maxRetryDelay, + az.stConfig.preserveACL, + ) + + log.Crit( + "ParseAndValidateConfig : Telemetry : %s, honour-ACL %v", + az.stConfig.telemetry, + az.stConfig.honourACL, + ) return nil } diff --git a/component/azstorage/connection.go b/component/azstorage/connection.go index 8c97a736ce..5abc8c960d 100644 --- a/component/azstorage/connection.go +++ b/component/azstorage/connection.go @@ -120,8 +120,8 @@ type AzConnection interface { List(prefix string, marker *string, count int32) ([]*internal.ObjAttr, *string, error) ReadToFile(name string, offset int64, count int64, fi *os.File) error - ReadBuffer(name string, offset int64, len int64) ([]byte, error) - ReadInBuffer(name string, offset int64, len int64, data []byte, etag *string) error + ReadBuffer(name string, offset int64, length int64) ([]byte, error) + ReadInBuffer(name string, offset int64, length int64, data []byte, etag *string) error WriteFromFile(name string, metadata map[string]*string, fi *os.File) error WriteFromBuffer(name string, metadata map[string]*string, data []byte) error diff --git a/component/azstorage/datalake.go b/component/azstorage/datalake.go index 88ceae7e66..4db6cbcba8 100644 --- a/component/azstorage/datalake.go +++ b/component/azstorage/datalake.go @@ -77,7 +77,7 @@ var _ AzConnection = &Datalake{} // This is also a known problem with the SDKs. func transformAccountEndpoint(potentialDfsEndpoint string) string { if strings.Contains(potentialDfsEndpoint, ".dfs.") { - return strings.Replace(potentialDfsEndpoint, ".dfs.", ".blob.", -1) + return strings.ReplaceAll(potentialDfsEndpoint, ".dfs.", ".blob.") } else { // Should we just throw here? log.Warn("Datalake::transformAccountEndpoint : Detected use of a custom endpoint. Not all operations are guaranteed to work.") @@ -128,7 +128,10 @@ func (dl *Datalake) UpdateServiceClient(key, value string) (err error) { // get the service client with updated SAS svcClient, err := dl.Auth.getServiceClient(&dl.Config) if err != nil { - log.Err("Datalake::UpdateServiceClient : Failed to get service client [%s]", err.Error()) + log.Err( + "Datalake::UpdateServiceClient : Failed to get service client [%s]", + err.Error(), + ) return err } @@ -200,7 +203,10 @@ func (dl *Datalake) TestPipeline() error { // we are just validating the auth mode used. So, no need to iterate over the pages resp, err := listPathPager.NextPage(context.Background()) if err != nil { - log.Err("Datalake::TestPipeline : Failed to validate account with given auth %s", err.Error()) + log.Err( + "Datalake::TestPipeline : Failed to validate account with given auth %s", + err.Error(), + ) var respErr *azcore.ResponseError errors.As(err, &respErr) if respErr != nil { @@ -248,7 +254,11 @@ func (dl *Datalake) CreateFile(name string, mode os.FileMode) error { } err = dl.ChangeMod(name, mode) if err != nil { - log.Err("Datalake::CreateFile : Failed to set permissions on file %s [%s]", name, err.Error()) + log.Err( + "Datalake::CreateFile : Failed to set permissions on file %s [%s]", + name, + err.Error(), + ) return err } @@ -271,14 +281,27 @@ func (dl *Datalake) CreateDirectory(name string) error { if err != nil { serr := storeDatalakeErrToErr(err) - if serr == InvalidPermission { - log.Err("Datalake::CreateDirectory : Insufficient permissions for %s [%s]", name, err.Error()) + switch serr { + case InvalidPermission: + log.Err( + "Datalake::CreateDirectory : Insufficient permissions for %s [%s]", + name, + err.Error(), + ) return syscall.EACCES - } else if serr == ErrFileAlreadyExists { - log.Err("Datalake::CreateDirectory : Path already exists for %s [%s]", name, err.Error()) + case ErrFileAlreadyExists: + log.Err( + "Datalake::CreateDirectory : Path already exists for %s [%s]", + name, + err.Error(), + ) return syscall.EEXIST - } else { - log.Err("Datalake::CreateDirectory : Failed to create directory %s [%s]", name, err.Error()) + default: + log.Err( + "Datalake::CreateDirectory : Failed to create directory %s [%s]", + name, + err.Error(), + ) return err } } @@ -299,16 +322,21 @@ func (dl *Datalake) DeleteFile(name string) (err error) { _, err = fileClient.Delete(context.Background(), nil) if err != nil { serr := storeDatalakeErrToErr(err) - if serr == ErrFileNotFound { + switch serr { + case ErrFileNotFound: log.Err("Datalake::DeleteFile : %s does not exist", name) return syscall.ENOENT - } else if serr == BlobIsUnderLease { + case BlobIsUnderLease: log.Err("Datalake::DeleteFile : %s is under lease [%s]", name, err.Error()) return syscall.EIO - } else if serr == InvalidPermission { - log.Err("Datalake::DeleteFile : Insufficient permissions for %s [%s]", name, err.Error()) + case InvalidPermission: + log.Err( + "Datalake::DeleteFile : Insufficient permissions for %s [%s]", + name, + err.Error(), + ) return syscall.EACCES - } else { + default: log.Err("Datalake::DeleteFile : Failed to delete file %s [%s]", name, err.Error()) return err } @@ -344,11 +372,17 @@ func (dl *Datalake) DeleteDirectory(name string) (err error) { func (dl *Datalake) RenameFile(source string, target string, srcAttr *internal.ObjAttr) error { log.Trace("Datalake::RenameFile : %s -> %s", source, target) - fileClient := dl.Filesystem.NewFileClient(url.PathEscape(filepath.Join(dl.Config.prefixPath, source))) + fileClient := dl.Filesystem.NewFileClient( + url.PathEscape(filepath.Join(dl.Config.prefixPath, source)), + ) - renameResponse, err := fileClient.Rename(context.Background(), filepath.Join(dl.Config.prefixPath, target), &file.RenameOptions{ - CPKInfo: dl.datalakeCPKOpt, - }) + renameResponse, err := fileClient.Rename( + context.Background(), + filepath.Join(dl.Config.prefixPath, target), + &file.RenameOptions{ + CPKInfo: dl.datalakeCPKOpt, + }, + ) if err != nil { serr := storeDatalakeErrToErr(err) if serr == ErrFileNotFound { @@ -367,10 +401,16 @@ func (dl *Datalake) RenameFile(source string, target string, srcAttr *internal.O func (dl *Datalake) RenameDirectory(source string, target string) error { log.Trace("Datalake::RenameDirectory : %s -> %s", source, target) - directoryClient := dl.Filesystem.NewDirectoryClient(url.PathEscape(filepath.Join(dl.Config.prefixPath, source))) - _, err := directoryClient.Rename(context.Background(), filepath.Join(dl.Config.prefixPath, target), &directory.RenameOptions{ - CPKInfo: dl.datalakeCPKOpt, - }) + directoryClient := dl.Filesystem.NewDirectoryClient( + url.PathEscape(filepath.Join(dl.Config.prefixPath, source)), + ) + _, err := directoryClient.Rename( + context.Background(), + filepath.Join(dl.Config.prefixPath, target), + &directory.RenameOptions{ + CPKInfo: dl.datalakeCPKOpt, + }, + ) if err != nil { serr := storeDatalakeErrToErr(err) if serr == ErrFileNotFound { @@ -395,13 +435,18 @@ func (dl *Datalake) GetAttr(name string) (blobAttr *internal.ObjAttr, err error) }) if err != nil { e := storeDatalakeErrToErr(err) - if e == ErrFileNotFound { + switch e { + case ErrFileNotFound: return blobAttr, syscall.ENOENT - } else if e == InvalidPermission { + case InvalidPermission: log.Err("Datalake::GetAttr : Insufficient permissions for %s [%s]", name, err.Error()) return blobAttr, syscall.EACCES - } else { - log.Err("Datalake::GetAttr : Failed to get path properties for %s [%s]", name, err.Error()) + default: + log.Err( + "Datalake::GetAttr : Failed to get path properties for %s [%s]", + name, + err.Error(), + ) return blobAttr, err } } @@ -463,7 +508,11 @@ func (dl *Datalake) GetAttr(name string) (blobAttr *internal.ObjAttr, err error) // List : Get a list of path matching the given prefix // This fetches the list using a marker so the caller code should handle marker logic // If count=0 - fetch max entries -func (dl *Datalake) List(prefix string, marker *string, count int32) ([]*internal.ObjAttr, *string, error) { +func (dl *Datalake) List( + prefix string, + marker *string, + count int32, +) ([]*internal.ObjAttr, *string, error) { return dl.BlockBlob.List(prefix, marker, count) } @@ -473,21 +522,31 @@ func (dl *Datalake) ReadToFile(name string, offset int64, count int64, fi *os.Fi } // ReadBuffer : Download a specific range from a file to a buffer -func (dl *Datalake) ReadBuffer(name string, offset int64, len int64) ([]byte, error) { - return dl.BlockBlob.ReadBuffer(name, offset, len) +func (dl *Datalake) ReadBuffer(name string, offset int64, length int64) ([]byte, error) { + return dl.BlockBlob.ReadBuffer(name, offset, length) } // ReadInBuffer : Download specific range from a file to a user provided buffer -func (dl *Datalake) ReadInBuffer(name string, offset int64, len int64, data []byte, etag *string) error { - return dl.BlockBlob.ReadInBuffer(name, offset, len, data, etag) +func (dl *Datalake) ReadInBuffer( + name string, + offset int64, + length int64, + data []byte, + etag *string, +) error { + return dl.BlockBlob.ReadInBuffer(name, offset, length, data, etag) } // WriteFromFile : Upload local file to file -func (dl *Datalake) WriteFromFile(name string, metadata map[string]*string, fi *os.File) (err error) { +func (dl *Datalake) WriteFromFile( + name string, + metadata map[string]*string, + fi *os.File, +) (err error) { // File in DataLake may have permissions and ACL set. Just uploading the file will override them. // So, we need to get the existing permissions and ACL and set them back after uploading the file. - var acl string = "" + var acl = "" var fileClient *file.Client = nil if dl.Config.preserveACL { @@ -567,13 +626,19 @@ func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error { Permissions: &newPerm, }) if err != nil { - log.Err("Datalake::ChangeMod : Failed to change mode of file %s to %s [%s]", name, mode, err.Error()) + log.Err( + "Datalake::ChangeMod : Failed to change mode of file %s to %s [%s]", + name, + mode, + err.Error(), + ) e := storeDatalakeErrToErr(err) - if e == ErrFileNotFound { + switch e { + case ErrFileNotFound: return syscall.ENOENT - } else if e == InvalidPermission { + case InvalidPermission: return syscall.EACCES - } else { + default: return err } } diff --git a/component/azstorage/datalake_test.go b/component/azstorage/datalake_test.go index 29e929c78e..49ead8de7c 100644 --- a/component/azstorage/datalake_test.go +++ b/component/azstorage/datalake_test.go @@ -259,8 +259,8 @@ func (s *datalakeTestSuite) TestFNSOverHNS() { // Testing dir and dir/ s.tearDownTestHelper(false) // Don't delete the generated container. config := fmt.Sprintf("azstorage:\n account-name: %s\n type: adls\n account-key: %s\n mode: key\n container: %s\n ", - storageTestConfigurationParameters.BlockAccount, storageTestConfigurationParameters.BlockKey, "fnsoverhns") - s.setupTestHelper(config, "fnsoverhns", true) + storageTestConfigurationParameters.BlockAccount, storageTestConfigurationParameters.BlockKey, s.container) + s.setupTestHelper(config, s.container, true) var paths = []string{generateDirectoryName(), generateDirectoryName() + "/"} for _, path := range paths { diff --git a/component/azstorage/utils.go b/component/azstorage/utils.go index f2c24925a2..b356dd6007 100644 --- a/component/azstorage/utils.go +++ b/component/azstorage/utils.go @@ -85,9 +85,13 @@ const ( // getAzStorageClientOptions : Create client options based on the config func getAzStorageClientOptions(conf *AzStorageConfig) (azcore.ClientOptions, error) { retryOptions := policy.RetryOptions{ - MaxRetries: conf.maxRetries, // Try at most 3 times to perform the operation (set to 1 to disable retries) - TryTimeout: time.Second * time.Duration(conf.maxTimeout), // Maximum time allowed for any single try - RetryDelay: time.Second * time.Duration(conf.backoffTime), // Backoff amount for each retry (exponential or linear) + MaxRetries: conf.maxRetries, // Try at most 3 times to perform the operation (set to 1 to disable retries) + TryTimeout: time.Second * time.Duration( + conf.maxTimeout, + ), // Maximum time allowed for any single try + RetryDelay: time.Second * time.Duration( + conf.backoffTime, + ), // Backoff amount for each retry (exponential or linear) MaxRetryDelay: time.Second * time.Duration(conf.maxRetryDelay), // Max delay between retries } @@ -102,7 +106,10 @@ func getAzStorageClientOptions(conf *AzStorageConfig) (azcore.ClientOptions, err transportOptions, err := newBlobfuse2HttpClient(conf) if err != nil { - log.Err("utils::getAzStorageClientOptions : Failed to create transport client [%s]", err.Error()) + log.Err( + "utils::getAzStorageClientOptions : Failed to create transport client [%s]", + err.Error(), + ) } perCallPolicies := []policy.Policy{telemetryPolicy} @@ -160,7 +167,8 @@ func getSDKLogOptions() policy.LogOptions { // - logging type is silent // - logging level is less than debug func setSDKLogListener() { - if os.Getenv("BLOBFUSE_DISABLE_SDK_LOG") == "true" || log.GetType() == "silent" || log.GetLogLevel() < common.ELogLevel.LOG_DEBUG() { + if os.Getenv("BLOBFUSE_DISABLE_SDK_LOG") == "true" || log.GetType() == "silent" || + log.GetLogLevel() < common.ELogLevel.LOG_DEBUG() { // reset listener azlog.SetListener(nil) } else { @@ -505,8 +513,16 @@ func getFileMode(permissions string) (os.FileMode, error) { // Expect service to return a 9 char string with r, w, x, or - const rwx = "rwxrwxrwx" if len(rwx) > len(permissions) { - log.Err("utils::getFileMode : Unexpected length of permissions from the service %d: %s", len(permissions), permissions) - return 0, fmt.Errorf("unexpected length of permissions from the service %d: %s", len(permissions), permissions) + log.Err( + "utils::getFileMode : Unexpected length of permissions from the service %d: %s", + len(permissions), + permissions, + ) + return 0, fmt.Errorf( + "unexpected length of permissions from the service %d: %s", + len(permissions), + permissions, + ) } else if len(rwx) < len(permissions) { log.Debug("utils::getFileMode : Unexpected permissions from the service: %s", permissions) } diff --git a/component/block_cache/block_cache.go b/component/block_cache/block_cache.go index df1da5ac5e..54f3c2a410 100755 --- a/component/block_cache/block_cache.go +++ b/component/block_cache/block_cache.go @@ -92,15 +92,15 @@ type BlockCache struct { // Structure defining your config parameters type BlockCacheOptions struct { - BlockSize float64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` - MemSize uint64 `config:"mem-size-mb" yaml:"mem-size-mb,omitempty"` - TmpPath string `config:"path" yaml:"path,omitempty"` - DiskSize uint64 `config:"disk-size-mb" yaml:"disk-size-mb,omitempty"` + BlockSize float64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` + MemSize uint64 `config:"mem-size-mb" yaml:"mem-size-mb,omitempty"` + TmpPath string `config:"path" yaml:"path,omitempty"` + DiskSize uint64 `config:"disk-size-mb" yaml:"disk-size-mb,omitempty"` DiskTimeout uint32 `config:"disk-timeout-sec" yaml:"timeout-sec,omitempty"` - PrefetchCount uint32 `config:"prefetch" yaml:"prefetch,omitempty"` - Workers uint32 `config:"parallelism" yaml:"parallelism,omitempty"` + PrefetchCount uint32 `config:"prefetch" yaml:"prefetch,omitempty"` + Workers uint32 `config:"parallelism" yaml:"parallelism,omitempty"` PrefetchOnOpen bool `config:"prefetch-on-open" yaml:"prefetch-on-open,omitempty"` - Consistency bool `config:"consistency" yaml:"consistency,omitempty"` + Consistency bool `config:"consistency" yaml:"consistency,omitempty"` CleanupOnStart bool `config:"cleanup-on-start" yaml:"cleanup-on-start,omitempty"` } @@ -206,7 +206,7 @@ func (bc *BlockCache) GenConfig() string { sb.WriteString(fmt.Sprintf("\n prefetch: %v", prefetch)) sb.WriteString(fmt.Sprintf("\n parallelism: %v", uint32(3*runtime.NumCPU()))) - var tmpPath string = "" + var tmpPath = "" _ = config.UnmarshalKey("tmp-path", &tmpPath) if tmpPath != "" { sb.WriteString(fmt.Sprintf("\n path: %v", tmpPath)) @@ -259,7 +259,8 @@ func (bc *BlockCache) Configure(_ bool) error { bc.prefetch = uint32(math.Max((MIN_PREFETCH*2)+1, (float64)(2*runtime.NumCPU()))) bc.noPrefetch = false - if (!config.IsSet(compName + ".mem-size-mb")) && (uint64(bc.prefetch)*uint64(bc.blockSize)) > bc.memSize { + if (!config.IsSet(compName + ".mem-size-mb")) && + (uint64(bc.prefetch)*uint64(bc.blockSize)) > bc.memSize { bc.prefetch = (MIN_PREFETCH * 2) + 1 } @@ -298,16 +299,24 @@ func (bc *BlockCache) Configure(_ bool) error { if bc.mntPath == bc.tmpPath { log.Err("BlockCache: config error [tmp-path is same as mount path]") - return fmt.Errorf("config error in %s error [tmp-path is same as mount path]", bc.Name()) + return fmt.Errorf( + "config error in %s error [tmp-path is same as mount path]", + bc.Name(), + ) } // Extract values from 'conf' and store them as you wish here _, err = os.Stat(bc.tmpPath) if os.IsNotExist(err) { - log.Info("BlockCache: config error [tmp-path does not exist. attempting to create tmp-path.]") + log.Info( + "BlockCache: config error [tmp-path does not exist. attempting to create tmp-path.]", + ) err := os.Mkdir(bc.tmpPath, os.FileMode(0755)) if err != nil { - log.Err("BlockCache: config error creating directory of temp path after clean [%s]", err.Error()) + log.Err( + "BlockCache: config error creating directory of temp path after clean [%s]", + err.Error(), + ) return fmt.Errorf("config error in %s [%s]", bc.Name(), err.Error()) } } @@ -324,20 +333,44 @@ func (bc *BlockCache) Configure(_ bool) error { } if (uint64(bc.prefetch) * uint64(bc.blockSize)) > bc.memSize { - log.Err("BlockCache::Configure : config error [memory limit too low for configured prefetch]") - return fmt.Errorf("config error in %s [memory limit too low for configured prefetch]", bc.Name()) + log.Err( + "BlockCache::Configure : config error [memory limit too low for configured prefetch]", + ) + return fmt.Errorf( + "config error in %s [memory limit too low for configured prefetch]", + bc.Name(), + ) } if bc.tmpPath != "" { - bc.diskPolicy, err = tlru.New(uint32((bc.diskSize)/bc.blockSize), bc.diskTimeout, bc.diskEvict, 60, bc.checkDiskUsage) + bc.diskPolicy, err = tlru.New( + uint32((bc.diskSize)/bc.blockSize), + bc.diskTimeout, + bc.diskEvict, + 60, + bc.checkDiskUsage, + ) if err != nil { log.Err("BlockCache::Configure : fail to create LRU for memory nodes [%s]", err.Error()) return fmt.Errorf("config error in %s [%s]", bc.Name(), err.Error()) } } - log.Crit("BlockCache::Configure : block size %v, mem size %v, worker %v, prefetch %v, disk path %v, max size %v, disk timeout %v, prefetch-on-open %t, maxDiskUsageHit %v, noPrefetch %v, consistency %v, cleanup-on-start %t", - bc.blockSize, bc.memSize, bc.workers, bc.prefetch, bc.tmpPath, bc.diskSize, bc.diskTimeout, bc.prefetchOnOpen, bc.maxDiskUsageHit, bc.noPrefetch, bc.consistency, conf.CleanupOnStart) + log.Crit( + "BlockCache::Configure : block size %v, mem size %v, worker %v, prefetch %v, disk path %v, max size %v, disk timeout %v, prefetch-on-open %t, maxDiskUsageHit %v, noPrefetch %v, consistency %v, cleanup-on-start %t", + bc.blockSize, + bc.memSize, + bc.workers, + bc.prefetch, + bc.tmpPath, + bc.diskSize, + bc.diskTimeout, + bc.prefetchOnOpen, + bc.maxDiskUsageHit, + bc.noPrefetch, + bc.consistency, + conf.CleanupOnStart, + ) return nil } @@ -346,7 +379,11 @@ func (bc *BlockCache) getDefaultDiskSize(path string) uint64 { var stat syscall.Statfs_t err := syscall.Statfs(path, &stat) if err != nil { - log.Info("BlockCache::getDefaultDiskSize : config error %s [%s]. Assigning a default value of 4GB or if any value is assigned to .disk-size-mb in config.", bc.Name(), err.Error()) + log.Info( + "BlockCache::getDefaultDiskSize : config error %s [%s]. Assigning a default value of 4GB or if any value is assigned to .disk-size-mb in config.", + bc.Name(), + err.Error(), + ) return uint64(4192) * _1MB } @@ -358,7 +395,11 @@ func (bc *BlockCache) getDefaultMemSize() uint64 { err := syscall.Sysinfo(&sysinfo) if err != nil { - log.Info("BlockCache::getDefaultMemSize : config error %s [%s]. Assigning a pre-defined value of 4GB.", bc.Name(), err.Error()) + log.Info( + "BlockCache::getDefaultMemSize : config error %s [%s]. Assigning a pre-defined value of 4GB.", + bc.Name(), + err.Error(), + ) return uint64(4192) * _1MB } @@ -388,7 +429,12 @@ func (bc *BlockCache) CreateFile(options internal.CreateFileOptions) (*handlemap // OpenFile: Create a handle for the file user has requested to open func (bc *BlockCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Handle, error) { - log.Trace("BlockCache::OpenFile : name=%s, flags=%X, mode=%s", options.Name, options.Flags, options.Mode) + log.Trace( + "BlockCache::OpenFile : name=%s, flags=%X, mode=%s", + options.Name, + options.Flags, + options.Mode, + ) attr, err := bc.NextComponent().GetAttr(internal.GetAttrOptions{Name: options.Name}) if err != nil { @@ -446,14 +492,24 @@ func (bc *BlockCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Han // This method is only called when the file is opened in O_RDWR mode. // Each Block's size must equal to blockSize set in config and last block size <= config's blockSize // returns true, if blockList is valid -func (bc *BlockCache) validateBlockList(handle *handlemap.Handle, options internal.OpenFileOptions, blockList *internal.CommittedBlockList) bool { +func (bc *BlockCache) validateBlockList( + handle *handlemap.Handle, + options internal.OpenFileOptions, + blockList *internal.CommittedBlockList, +) bool { lst, _ := handle.GetValue("blockList") listMap := lst.(map[int64]*blockInfo) listLen := len(*blockList) for idx, block := range *blockList { - if (idx < (listLen-1) && block.Size != bc.blockSize) || (idx == (listLen-1) && block.Size > bc.blockSize) { - log.Err("BlockCache::validateBlockList : Block size mismatch for %s [block: %v, size: %v]", options.Name, block.Id, block.Size) + if (idx < (listLen-1) && block.Size != bc.blockSize) || + (idx == (listLen-1) && block.Size > bc.blockSize) { + log.Err( + "BlockCache::validateBlockList : Block size mismatch for %s [block: %v, size: %v]", + options.Name, + block.Id, + block.Size, + ) return false } listMap[int64(idx)] = &blockInfo{ @@ -488,7 +544,11 @@ func (bc *BlockCache) FlushFile(options internal.FlushFileOptions) error { if bc.lazyWrite && !options.CloseInProgress { // As lazy-write is enable, upload will be scheduled when file is closed. - log.Info("BlockCache::FlushFile : %s will be flushed when handle %d is closed", options.Handle.Path, options.Handle.ID) + log.Info( + "BlockCache::FlushFile : %s will be flushed when handle %d is closed", + options.Handle.Path, + options.Handle.ID, + ) return nil } @@ -499,7 +559,11 @@ func (bc *BlockCache) FlushFile(options internal.FlushFileOptions) error { if options.Handle.Dirty() { err := bc.commitBlocks(options.Handle) if err != nil { - log.Err("BlockCache::FlushFile : Failed to commit blocks for %s [%s]", options.Handle.Path, err.Error()) + log.Err( + "BlockCache::FlushFile : Failed to commit blocks for %s [%s]", + options.Handle.Path, + err.Error(), + ) return err } } @@ -527,8 +591,14 @@ func (bc *BlockCache) closeFileInternal(options internal.CloseFileOptions) error defer bc.fileCloseOpt.Done() if options.Handle.Dirty() { - log.Info("BlockCache::CloseFile : name=%s, handle=%d dirty. Flushing the file.", options.Handle.Path, options.Handle.ID) - err := bc.FlushFile(internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}) //nolint + log.Info( + "BlockCache::CloseFile : name=%s, handle=%d dirty. Flushing the file.", + options.Handle.Path, + options.Handle.ID, + ) + err := bc.FlushFile( + internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}, + ) //nolint if err != nil { log.Err("BlockCache::CloseFile : failed to flush file %s", options.Handle.Path) return err @@ -591,7 +661,13 @@ func (bc *BlockCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, block, err := bc.getBlock(options.Handle, uint64(options.Offset)) if err != nil { if err != io.EOF { - log.Err("BlockCache::ReadInBuffer : Failed to get Block %v=>%s offset %v [%v]", options.Handle.ID, options.Handle.Path, options.Offset, err.Error()) + log.Err( + "BlockCache::ReadInBuffer : Failed to get Block %v=>%s offset %v [%v]", + options.Handle.ID, + options.Handle.Path, + options.Offset, + err.Error(), + ) } return dataRead, err } @@ -663,10 +739,20 @@ func (bc *BlockCache) getBlock(handle *handlemap.Handle, readoffset uint64) (*Bl shouldCommit, shouldDownload := shouldCommitAndDownload(int64(index), handle) if shouldCommit { // commit all the uncommitted blocks to storage - log.Debug("BlockCache::getBlock : Downloading an uncommitted block %v, so committing all the staged blocks for %v=>%s", index, handle.ID, handle.Path) + log.Debug( + "BlockCache::getBlock : Downloading an uncommitted block %v, so committing all the staged blocks for %v=>%s", + index, + handle.ID, + handle.Path, + ) err := bc.commitBlocks(handle) if err != nil { - log.Err("BlockCache::getBlock : Failed to commit blocks for %v=>%s [%s]", handle.ID, handle.Path, err.Error()) + log.Err( + "BlockCache::getBlock : Failed to commit blocks for %v=>%s [%s]", + handle.ID, + handle.Path, + err.Error(), + ) return nil, err } } else if !shouldCommit && !shouldDownload { @@ -695,12 +781,25 @@ func (bc *BlockCache) getBlock(handle *handlemap.Handle, readoffset uint64) (*Bl // If this is the first read request then prefetch all required nodes val, _ := handle.GetValue("#") if !bc.noPrefetch && val.(uint64) == 0 { - log.Debug("BlockCache::getBlock : Starting the prefetch %v=>%s (offset %v, index %v)", handle.ID, handle.Path, readoffset, index) + log.Debug( + "BlockCache::getBlock : Starting the prefetch %v=>%s (offset %v, index %v)", + handle.ID, + handle.Path, + readoffset, + index, + ) // This is the first read for this file handle so start prefetching all the nodes err := bc.startPrefetch(handle, index, false) if err != nil && err != io.EOF { - log.Err("BlockCache::getBlock : Unable to start prefetch %v=>%s (offset %v, index %v) [%s]", handle.ID, handle.Path, readoffset, index, err.Error()) + log.Err( + "BlockCache::getBlock : Unable to start prefetch %v=>%s (offset %v, index %v) [%s]", + handle.ID, + handle.Path, + readoffset, + index, + err.Error(), + ) return nil, err } } else { @@ -720,8 +819,14 @@ func (bc *BlockCache) getBlock(handle *handlemap.Handle, readoffset uint64) (*Bl // This node was not found so above logic should have queued it up, retry searching now node, found = handle.GetValue(fmt.Sprintf("%v", index)) if !found { - log.Err("BlockCache::getBlock : Failed to get the required block %v=>%s (offset %v, index %v)", handle.ID, handle.Path, readoffset, index) - return nil, fmt.Errorf("not able to find block immediately after scheudling") + log.Err( + "BlockCache::getBlock : Failed to get the required block %v=>%s (offset %v, index %v)", + handle.ID, + handle.Path, + readoffset, + index, + ) + return nil, fmt.Errorf("not able to find block immediately after scheduling") } } @@ -736,7 +841,13 @@ func (bc *BlockCache) getBlock(handle *handlemap.Handle, readoffset uint64) (*Bl switch t { case BlockStatusDownloaded: - log.Debug("BlockCache::getBlock : Downloaded block %v for %v=>%s (read offset %v)", index, handle.ID, handle.Path, readoffset) + log.Debug( + "BlockCache::getBlock : Downloaded block %v for %v=>%s (read offset %v)", + index, + handle.ID, + handle.Path, + readoffset, + ) block.flags.Clear(BlockFlagDownloading) @@ -757,11 +868,23 @@ func (bc *BlockCache) getBlock(handle *handlemap.Handle, readoffset uint64) (*Bl block.flags.Set(BlockFlagSynced) case BlockStatusUploaded: - log.Debug("BlockCache::getBlock : Staged block %v for %v=>%s (read offset %v)", index, handle.ID, handle.Path, readoffset) + log.Debug( + "BlockCache::getBlock : Staged block %v for %v=>%s (read offset %v)", + index, + handle.ID, + handle.Path, + readoffset, + ) block.flags.Clear(BlockFlagUploading) case BlockStatusDownloadFailed: - log.Err("BlockCache::getBlock : Failed to download block %v for %v=>%s (read offset %v)", index, handle.ID, handle.Path, readoffset) + log.Err( + "BlockCache::getBlock : Failed to download block %v for %v=>%s (read offset %v)", + index, + handle.ID, + handle.Path, + readoffset, + ) // Remove this node from handle so that next read retries to download the block again bc.releaseDownloadFailedBlock(handle, block) @@ -769,7 +892,13 @@ func (bc *BlockCache) getBlock(handle *handlemap.Handle, readoffset uint64) (*Bl case BlockStatusUploadFailed: // Local data is still valid so continue using this buffer - log.Err("BlockCache::getBlock : Failed to upload block %v for %v=>%s (read offset %v)", index, handle.ID, handle.Path, readoffset) + log.Err( + "BlockCache::getBlock : Failed to upload block %v for %v=>%s (read offset %v)", + index, + handle.ID, + handle.Path, + readoffset, + ) block.flags.Clear(BlockFlagUploading) // Move this block to end of queue as this is still modified and un-staged @@ -796,7 +925,12 @@ func (bc *BlockCache) startPrefetch(handle *handlemap.Handle, index uint64, pref if currentCnt > MIN_PREFETCH { // As this file is in random read mode now, release the excess buffers. Just keep 5 buffers for it to work - log.Info("BlockCache::startPrefetch : Cleanup excessive blocks %v=>%s index %v", handle.ID, handle.Path, index) + log.Info( + "BlockCache::startPrefetch : Cleanup excessive blocks %v=>%s index %v", + handle.ID, + handle.Path, + index, + ) // As this is random read move all in process blocks to free list nodeList := handle.Buffers.Cooking @@ -878,10 +1012,20 @@ func (bc *BlockCache) startPrefetch(handle *handlemap.Handle, index uint64, pref shouldCommit, _ := shouldCommitAndDownload(int64(index), handle) if shouldCommit { // This shall happen only for the first uncommitted block and shall flush all the uncommitted blocks to storage - log.Debug("BlockCache::startPrefetch : Fetching an uncommitted block %v, so committing all the staged blocks for %v=>%s", index, handle.ID, handle.Path) + log.Debug( + "BlockCache::startPrefetch : Fetching an uncommitted block %v, so committing all the staged blocks for %v=>%s", + index, + handle.ID, + handle.Path, + ) err := bc.commitBlocks(handle) if err != nil { - log.Err("BlockCache::startPrefetch : Failed to commit blocks for %v=>%s [%s]", handle.ID, handle.Path, err.Error()) + log.Err( + "BlockCache::startPrefetch : Failed to commit blocks for %v=>%s [%s]", + handle.ID, + handle.Path, + err.Error(), + ) return err } } @@ -900,7 +1044,13 @@ func (bc *BlockCache) startPrefetch(handle *handlemap.Handle, index uint64, pref // refreshBlock: Get a block from the list and prepare it for download func (bc *BlockCache) refreshBlock(handle *handlemap.Handle, index uint64, prefetch bool) error { - log.Trace("BlockCache::refreshBlock : Request to download %v=>%s (index %v, prefetch %v)", handle.ID, handle.Path, index, prefetch) + log.Trace( + "BlockCache::refreshBlock : Request to download %v=>%s (index %v, prefetch %v)", + handle.ID, + handle.Path, + index, + prefetch, + ) // Convert index to offset offset := index * bc.blockSize @@ -915,7 +1065,14 @@ func (bc *BlockCache) refreshBlock(handle *handlemap.Handle, index uint64, prefe // this might happen when all blocks are under download and no first reader is hit for any of them block, err := bc.blockPool.MustGet() if err != nil { - log.Err("BlockCache::refreshBlock : Unable to allocate block %v=>%s (index %v, prefetch %v) %v", handle.ID, handle.Path, index, prefetch, err) + log.Err( + "BlockCache::refreshBlock : Unable to allocate block %v=>%s (index %v, prefetch %v) %v", + handle.ID, + handle.Path, + index, + prefetch, + err, + ) return err } @@ -931,7 +1088,13 @@ func (bc *BlockCache) refreshBlock(handle *handlemap.Handle, index uint64, prefe // If the block is being staged, then wait till it is uploaded // and then use it for read if block.flags.IsSet(BlockFlagUploading) { - log.Debug("BlockCache::refreshBlock : Waiting for the block %v to upload before using it for block %v read for %v=>%s", block.id, index, handle.ID, handle.Path) + log.Debug( + "BlockCache::refreshBlock : Waiting for the block %v to upload before using it for block %v read for %v=>%s", + block.id, + index, + handle.ID, + handle.Path, + ) _, ok := <-block.state if ok { block.Unblock() @@ -1018,7 +1181,7 @@ func (bc *BlockCache) download(item *workItem) { log.Err("BlockCache::download : Failed to open file %s [%s]", fileName, err.Error()) _ = os.Remove(localPath) } else { - var successfulRead bool = true + var successfulRead = true numberOfBytes, err := f.Read(item.block.data) if err != nil { log.Err("BlockCache::download : Failed to read data from disk cache %s [%s]", fileName, err.Error()) @@ -1060,7 +1223,13 @@ func (bc *BlockCache) download(item *workItem) { if item.failCnt > MAX_FAIL_CNT { // If we failed to read the data 3 times then just give up - log.Err("BlockCache::download : 3 attempts to download a block have failed %v=>%s (index %v, offset %v)", item.handle.ID, item.handle.Path, item.block.id, item.block.offset) + log.Err( + "BlockCache::download : 3 attempts to download a block have failed %v=>%s (index %v, offset %v)", + item.handle.ID, + item.handle.Path, + item.block.id, + item.block.offset, + ) item.block.Failed() item.block.Ready(BlockStatusDownloadFailed) return @@ -1068,7 +1237,13 @@ func (bc *BlockCache) download(item *workItem) { if err != nil && err != io.EOF { // Fail to read the data so just reschedule this request - log.Err("BlockCache::download : Failed to read %v=>%s from offset %v [%s]", item.handle.ID, item.handle.Path, item.block.id, err.Error()) + log.Err( + "BlockCache::download : Failed to read %v=>%s from offset %v [%s]", + item.handle.ID, + item.handle.Path, + item.block.id, + err.Error(), + ) item.failCnt++ bc.threadPool.Schedule(false, item) return @@ -1083,7 +1258,13 @@ func (bc *BlockCache) download(item *workItem) { // Compare the ETAG value and fail download if blob has changed if etag != "" { if item.ETag != "" && item.ETag != etag { - log.Err("BlockCache::download : Blob has changed for %v=>%s (index %v, offset %v)", item.handle.ID, item.handle.Path, item.block.id, item.block.offset) + log.Err( + "BlockCache::download : Blob has changed for %v=>%s (index %v, offset %v)", + item.handle.ID, + item.handle.Path, + item.block.id, + item.block.offset, + ) item.block.Failed() item.block.Ready(BlockStatusDownloadFailed) return @@ -1093,7 +1274,11 @@ func (bc *BlockCache) download(item *workItem) { if bc.tmpPath != "" { err := os.MkdirAll(filepath.Dir(localPath), 0777) if err != nil { - log.Err("BlockCache::download : error creating directory structure for file %s [%s]", localPath, err.Error()) + log.Err( + "BlockCache::download : error creating directory structure for file %s [%s]", + localPath, + err.Error(), + ) return } @@ -1102,7 +1287,11 @@ func (bc *BlockCache) download(item *workItem) { if err == nil { _, err := f.Write(item.block.data[:n]) if err != nil { - log.Err("BlockCache::download : Failed to write %s to disk [%v]", localPath, err.Error()) + log.Err( + "BlockCache::download : Failed to write %s to disk [%v]", + localPath, + err.Error(), + ) _ = os.Remove(localPath) } @@ -1114,7 +1303,11 @@ func (bc *BlockCache) download(item *workItem) { hash := common.GetCRC64(item.block.data, n) err = syscall.Setxattr(localPath, "user.md5sum", hash, 0) if err != nil { - log.Err("BlockCache::download : Failed to set md5sum for file %s [%v]", localPath, err.Error()) + log.Err( + "BlockCache::download : Failed to set md5sum for file %s [%v]", + localPath, + err.Error(), + ) } } } @@ -1124,7 +1317,12 @@ func (bc *BlockCache) download(item *workItem) { item.block.Ready(BlockStatusDownloaded) } -func checkBlockConsistency(blockCache *BlockCache, item *workItem, numberOfBytes int, localPath, fileName string) bool { +func checkBlockConsistency( + blockCache *BlockCache, + item *workItem, + numberOfBytes int, + localPath, fileName string, +) bool { if !blockCache.consistency { return true } @@ -1135,7 +1333,11 @@ func checkBlockConsistency(blockCache *BlockCache, item *workItem, numberOfBytes xattrHash := make([]byte, 8) _, err := syscall.Getxattr(localPath, "user.md5sum", xattrHash) if err != nil { - log.Err("BlockCache::download : Failed to get md5sum for file %s [%v]", fileName, err.Error()) + log.Err( + "BlockCache::download : Failed to get md5sum for file %s [%v]", + fileName, + err.Error(), + ) } else { // Compare checksums if !bytes.Equal(actualHash, xattrHash) { @@ -1163,7 +1365,11 @@ func (bc *BlockCache) WriteFile(options *internal.WriteFileOptions) (int, error) block, err := bc.getOrCreateBlock(options.Handle, uint64(options.Offset)) if err != nil { // Failed to get block for writing - log.Err("BlockCache::WriteFile : Unable to allocate block for %s [%s]", options.Handle.Path, err.Error()) + log.Err( + "BlockCache::WriteFile : Unable to allocate block for %s [%s]", + options.Handle.Path, + err.Error(), + ) return dataWritten, err } @@ -1193,7 +1399,12 @@ func (bc *BlockCache) getOrCreateBlock(handle *handlemap.Handle, offset uint64) // Check the given block index is already available or not index := bc.getBlockIndex(offset) if index >= MAX_BLOCKS { - log.Err("BlockCache::getOrCreateBlock : Failed to get Block %v=>%s offset %v", handle.ID, handle.Path, offset) + log.Err( + "BlockCache::getOrCreateBlock : Failed to get Block %v=>%s offset %v", + handle.ID, + handle.Path, + offset, + ) return nil, fmt.Errorf("block index out of range. Increase your block size") } @@ -1212,7 +1423,13 @@ func (bc *BlockCache) getOrCreateBlock(handle *handlemap.Handle, offset uint64) // Either the block is not fetched yet or offset goes beyond the file size block, err = bc.blockPool.MustGet() if err != nil { - log.Err("BlockCache::getOrCreateBlock : Unable to allocate block %v=>%s (index %v) %v", handle.ID, handle.Path, index, err) + log.Err( + "BlockCache::getOrCreateBlock : Unable to allocate block %v=>%s (index %v) %v", + handle.ID, + handle.Path, + index, + err, + ) return nil, err } @@ -1226,10 +1443,20 @@ func (bc *BlockCache) getOrCreateBlock(handle *handlemap.Handle, offset uint64) // if a block has been staged and deleted from the buffer list, then we should commit the existing blocks // commit the dirty blocks and download the given block if shouldCommit { - log.Debug("BlockCache::getOrCreateBlock : Fetching an uncommitted block %v, so committing all the staged blocks for %v=>%s", block.id, handle.ID, handle.Path) + log.Debug( + "BlockCache::getOrCreateBlock : Fetching an uncommitted block %v, so committing all the staged blocks for %v=>%s", + block.id, + handle.ID, + handle.Path, + ) err = bc.commitBlocks(handle) if err != nil { - log.Err("BlockCache::getOrCreateBlock : Failed to commit blocks for %v=>%s [%s]", handle.ID, handle.Path, err.Error()) + log.Err( + "BlockCache::getOrCreateBlock : Failed to commit blocks for %v=>%s [%s]", + handle.ID, + handle.Path, + err.Error(), + ) return nil, err } } @@ -1239,7 +1466,12 @@ func (bc *BlockCache) getOrCreateBlock(handle *handlemap.Handle, offset uint64) // - it was committed by the above commit blocks operation if shouldDownload || shouldCommit { // We are writing somewhere in between so just fetch this block - log.Debug("BlockCache::getOrCreateBlock : Downloading block %v for %v=>%v", block.id, handle.ID, handle.Path) + log.Debug( + "BlockCache::getOrCreateBlock : Downloading block %v for %v=>%v", + block.id, + handle.ID, + handle.Path, + ) bc.lineupDownload(handle, block, false) // Now wait for download to complete @@ -1247,7 +1479,12 @@ func (bc *BlockCache) getOrCreateBlock(handle *handlemap.Handle, offset uint64) // if the block failed to download, it can't be used for overwriting if block.IsFailed() { - log.Err("BlockCache::getOrCreateBlock : Failed to download block %v for %v=>%s", block.id, handle.ID, handle.Path) + log.Err( + "BlockCache::getOrCreateBlock : Failed to download block %v for %v=>%s", + block.id, + handle.ID, + handle.Path, + ) // Remove this node from handle so that next read retries to download the block again bc.releaseDownloadFailedBlock(handle, block) @@ -1269,7 +1506,11 @@ func (bc *BlockCache) getOrCreateBlock(handle *handlemap.Handle, offset uint64) if handle.Buffers.Cooking.Len() > MIN_WRITE_BLOCK { err = bc.stageBlocks(handle, 1) if err != nil { - log.Err("BlockCache::getOrCreateBlock : Unable to stage blocks for %s [%s]", handle.Path, err.Error()) + log.Err( + "BlockCache::getOrCreateBlock : Unable to stage blocks for %s [%s]", + handle.Path, + err.Error(), + ) } } @@ -1373,7 +1614,13 @@ func (bc *BlockCache) printCooking(handle *handlemap.Handle) { //nolint cookedId = append(cookedId, block.id) node = nextNode } - log.Debug("BlockCache::printCookingnCooked : %v=>%s \n Cooking: [%v] \n Cooked: [%v]", handle.ID, handle.Path, cookingId, cookedId) + log.Debug( + "BlockCache::printCookingnCooked : %v=>%s \n Cooking: [%v] \n Cooked: [%v]", + handle.ID, + handle.Path, + cookingId, + cookedId, + ) } @@ -1402,7 +1649,11 @@ func shouldCommitAndDownload(blockID int64, handle *handlemap.Handle) (bool, boo } // lineupUpload : Create a work item and schedule the upload -func (bc *BlockCache) lineupUpload(handle *handlemap.Handle, block *Block, listMap map[int64]*blockInfo) { +func (bc *BlockCache) lineupUpload( + handle *handlemap.Handle, + block *Block, + listMap map[int64]*blockInfo, +) { id := common.GetBlockID(common.BlockIDLength) listMap[block.id] = &blockInfo{ id: id, @@ -1410,7 +1661,14 @@ func (bc *BlockCache) lineupUpload(handle *handlemap.Handle, block *Block, listM size: bc.getBlockSize(uint64(handle.Size), block), } - log.Debug("BlockCache::lineupUpload : block %v, size %v for %v=>%s, blockId %v", block.id, bc.getBlockSize(uint64(handle.Size), block), handle.ID, handle.Path, id) + log.Debug( + "BlockCache::lineupUpload : block %v, size %v for %v=>%s, blockId %v", + block.id, + bc.getBlockSize(uint64(handle.Size), block), + handle.ID, + handle.Path, + id, + ) item := &workItem{ handle: handle, block: block, @@ -1436,10 +1694,7 @@ func (bc *BlockCache) waitAndFreeUploadedBlocks(handle *handlemap.Handle, cnt in node := nodeList.Front() nextNode := node - wipeoutBlock := false - if cnt == 1 { - wipeoutBlock = true - } + wipeoutBlock := cnt == 1 for nextNode != nil && cnt > 0 { node = nextNode @@ -1460,14 +1715,26 @@ func (bc *BlockCache) waitAndFreeUploadedBlocks(handle *handlemap.Handle, cnt in } if block.IsFailed() { - log.Err("BlockCache::waitAndFreeUploadedBlocks : Failed to upload block, posting back to cooking list %v=>%s (index %v, offset %v)", handle.ID, handle.Path, block.id, block.offset) + log.Err( + "BlockCache::waitAndFreeUploadedBlocks : Failed to upload block, posting back to cooking list %v=>%s (index %v, offset %v)", + handle.ID, + handle.Path, + block.id, + block.offset, + ) bc.addToCooking(handle, block) continue } cnt-- if wipeoutBlock || block.id == -1 { - log.Debug("BlockCache::waitAndFreeUploadedBlocks : Block cleanup for block %v=>%s (index %v, offset %v)", handle.ID, handle.Path, block.id, block.offset) + log.Debug( + "BlockCache::waitAndFreeUploadedBlocks : Block cleanup for block %v=>%s (index %v, offset %v)", + handle.ID, + handle.Path, + block.id, + block.offset, + ) handle.RemoveValue(fmt.Sprintf("%v", block.id)) nodeList.Remove(node) block.node = nil @@ -1495,12 +1762,24 @@ func (bc *BlockCache) upload(item *workItem) { Id: item.blockId}) if err != nil { // Fail to write the data so just reschedule this request - log.Err("BlockCache::upload : Failed to write %v=>%s from offset %v [%s]", item.handle.ID, item.handle.Path, item.block.id, err.Error()) + log.Err( + "BlockCache::upload : Failed to write %v=>%s from offset %v [%s]", + item.handle.ID, + item.handle.Path, + item.block.id, + err.Error(), + ) item.failCnt++ if item.failCnt > MAX_FAIL_CNT { // If we failed to write the data 3 times then just give up - log.Err("BlockCache::upload : 3 attempts to upload a block have failed %v=>%s (index %v, offset %v)", item.handle.ID, item.handle.Path, item.block.id, item.block.offset) + log.Err( + "BlockCache::upload : 3 attempts to upload a block have failed %v=>%s (index %v, offset %v)", + item.handle.ID, + item.handle.Path, + item.block.id, + item.block.offset, + ) item.block.Failed() item.block.Ready(BlockStatusUploadFailed) return @@ -1515,7 +1794,11 @@ func (bc *BlockCache) upload(item *workItem) { err := os.MkdirAll(filepath.Dir(localPath), 0777) if err != nil { - log.Err("BlockCache::upload : error creating directory structure for file %s [%s]", localPath, err.Error()) + log.Err( + "BlockCache::upload : error creating directory structure for file %s [%s]", + localPath, + err.Error(), + ) goto return_safe } @@ -1524,7 +1807,11 @@ func (bc *BlockCache) upload(item *workItem) { if err == nil { _, err := f.Write(item.block.data[0:blockSize]) if err != nil { - log.Err("BlockCache::upload : Failed to write %s to disk [%v]", localPath, err.Error()) + log.Err( + "BlockCache::upload : Failed to write %s to disk [%v]", + localPath, + err.Error(), + ) _ = os.Remove(localPath) goto return_safe } @@ -1543,7 +1830,11 @@ func (bc *BlockCache) upload(item *workItem) { hash := common.GetCRC64(item.block.data, int(blockSize)) err = syscall.Setxattr(localPath, "user.md5sum", hash, 0) if err != nil { - log.Err("BlockCache::download : Failed to set md5sum for file %s [%v]", localPath, err.Error()) + log.Err( + "BlockCache::download : Failed to set md5sum for file %s [%v]", + localPath, + err.Error(), + ) } } } @@ -1569,7 +1860,11 @@ func (bc *BlockCache) commitBlocks(handle *handlemap.Handle) error { err := bc.stageBlocks(handle, MAX_BLOCKS) if err != nil { - log.Err("BlockCache::commitBlocks : Failed to stage blocks for %s [%s]", handle.Path, err.Error()) + log.Err( + "BlockCache::commitBlocks : Failed to stage blocks for %s [%s]", + handle.Path, + err.Error(), + ) return err } @@ -1584,7 +1879,10 @@ func (bc *BlockCache) commitBlocks(handle *handlemap.Handle) error { node = node.Next() if block.IsDirty() { - log.Err("BlockCache::commitBlocks : Failed to stage blocks for %s after 3 attempts", handle.Path) + log.Err( + "BlockCache::commitBlocks : Failed to stage blocks for %s after 3 attempts", + handle.Path, + ) return fmt.Errorf("failed to stage blocks") } } @@ -1592,17 +1890,26 @@ func (bc *BlockCache) commitBlocks(handle *handlemap.Handle) error { blockIDList, restageIds, err := bc.getBlockIDList(handle) if err != nil { - log.Err("BlockCache::commitBlocks : Failed to get block id list for %v [%v]", handle.Path, err.Error()) + log.Err( + "BlockCache::commitBlocks : Failed to get block id list for %v [%v]", + handle.Path, + err.Error(), + ) return err } log.Debug("BlockCache::commitBlocks : Committing blocks for %s", handle.Path) // Commit the block list now - var newEtag string = "" - err = bc.NextComponent().CommitData(internal.CommitDataOptions{Name: handle.Path, List: blockIDList, BlockSize: bc.blockSize, NewETag: &newEtag}) + var newEtag = "" + err = bc.NextComponent(). + CommitData(internal.CommitDataOptions{Name: handle.Path, List: blockIDList, BlockSize: bc.blockSize, NewETag: &newEtag}) if err != nil { - log.Err("BlockCache::commitBlocks : Failed to commit blocks for %s [%s]", handle.Path, err.Error()) + log.Err( + "BlockCache::commitBlocks : Failed to commit blocks for %s [%s]", + handle.Path, + err.Error(), + ) return err } @@ -1629,7 +1936,11 @@ func (bc *BlockCache) commitBlocks(handle *handlemap.Handle) error { // Commit the block list again block, err := bc.getOrCreateBlock(handle, uint64(i)*bc.blockSize) if err != nil { - log.Err("BlockCache::commitBlocks : Failed to get block for %v [%v]", handle.Path, err.Error()) + log.Err( + "BlockCache::commitBlocks : Failed to get block for %v [%v]", + handle.Path, + err.Error(), + ) return err } @@ -1683,7 +1994,13 @@ func (bc *BlockCache) getBlockIDList(handle *handlemap.Handle) ([]string, []stri fillerSize := (bc.blockSize - listMap[offsets[i]].size) fillerOffset := uint64(offsets[i]*int64(bc.blockSize)) + listMap[offsets[i]].size - log.Debug("BlockCache::getBlockIDList : Staging semi zero block for %v=>%v offset %v, size %v", handle.ID, handle.Path, fillerOffset, fillerSize) + log.Debug( + "BlockCache::getBlockIDList : Staging semi zero block for %v=>%v offset %v, size %v", + handle.ID, + handle.Path, + fillerOffset, + fillerSize, + ) err := bc.NextComponent().StageData(internal.StageDataOptions{ Name: handle.Path, Data: bc.blockPool.zeroBlock.data[:fillerSize], @@ -1691,19 +2008,38 @@ func (bc *BlockCache) getBlockIDList(handle *handlemap.Handle) ([]string, []stri }) if err != nil { - log.Err("BlockCache::getBlockIDList : Failed to write semi zero block for %v=>%v [%s]", handle.ID, handle.Path, err.Error()) + log.Err( + "BlockCache::getBlockIDList : Failed to write semi zero block for %v=>%v [%s]", + handle.ID, + handle.Path, + err.Error(), + ) return nil, nil, err } blockIDList = append(blockIDList, listMap[offsets[i]].id) - log.Debug("BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v : %v, size %v)", handle.ID, handle.Path, offsets[i], listMap[offsets[i]].id, listMap[offsets[i]].size) + log.Debug( + "BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v : %v, size %v)", + handle.ID, + handle.Path, + offsets[i], + listMap[offsets[i]].id, + listMap[offsets[i]].size, + ) // After the flush call we need to merge this particular block with the next block (semi zero block) restageId = append(restageId, listMap[offsets[i]].id) // Add the semi zero block to the list blockIDList = append(blockIDList, id) - log.Debug("BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v : %v, size %v)", handle.ID, handle.Path, fillerOffset, id, fillerSize) + log.Debug( + "BlockCache::getBlockIDList : Preparing blocklist for %v=>%s (%v : %v, size %v)", + handle.ID, + handle.Path, + fillerOffset, + id, + fillerSize, + ) index++ i++ @@ -1745,13 +2081,26 @@ func (bc *BlockCache) getBlockIDList(handle *handlemap.Handle) ([]string, []stri func (bc *BlockCache) stageZeroBlock(handle *handlemap.Handle, tryCnt int) (string, error) { if tryCnt > MAX_FAIL_CNT { // If we failed to write the data 3 times then just give up - log.Err("BlockCache::stageZeroBlock : 3 attempts to upload zero block have failed %v=>%v", handle.ID, handle.Path) - return "", fmt.Errorf("3 attempts to upload zero block have failed for %v=>%v", handle.ID, handle.Path) + log.Err( + "BlockCache::stageZeroBlock : 3 attempts to upload zero block have failed %v=>%v", + handle.ID, + handle.Path, + ) + return "", fmt.Errorf( + "3 attempts to upload zero block have failed for %v=>%v", + handle.ID, + handle.Path, + ) } id := common.GetBlockID(common.BlockIDLength) - log.Debug("BlockCache::stageZeroBlock : Staging zero block for %v=>%v, try = %v", handle.ID, handle.Path, tryCnt) + log.Debug( + "BlockCache::stageZeroBlock : Staging zero block for %v=>%v, try = %v", + handle.ID, + handle.Path, + tryCnt, + ) err := bc.NextComponent().StageData(internal.StageDataOptions{ Name: handle.Path, Data: bc.blockPool.zeroBlock.data[:], @@ -1759,11 +2108,22 @@ func (bc *BlockCache) stageZeroBlock(handle *handlemap.Handle, tryCnt int) (stri }) if err != nil { - log.Err("BlockCache::stageZeroBlock : Failed to write zero block for %v=>%v, try %v [%v]", handle.ID, handle.Path, tryCnt, err.Error()) + log.Err( + "BlockCache::stageZeroBlock : Failed to write zero block for %v=>%v, try %v [%v]", + handle.ID, + handle.Path, + tryCnt, + err.Error(), + ) return bc.stageZeroBlock(handle, tryCnt+1) } - log.Debug("BlockCache::stageZeroBlock : Zero block id for %v=>%v = %v", handle.ID, handle.Path, id) + log.Debug( + "BlockCache::stageZeroBlock : Zero block id for %v=>%v = %v", + handle.ID, + handle.Path, + id, + ) return id, nil } @@ -1914,7 +2274,9 @@ func (bc *BlockCache) RenameFile(options internal.RenameFileOptions) error { func (bc *BlockCache) SyncFile(options internal.SyncFileOptions) error { log.Trace("BlockCache::SyncFile : handle=%d, path=%s", options.Handle.ID, options.Handle.Path) - err := bc.FlushFile(internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}) //nolint + err := bc.FlushFile( + internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}, + ) //nolint if err != nil { log.Err("BlockCache::SyncFile : failed to flush file %s", options.Handle.Path) return err @@ -1968,30 +2330,66 @@ func NewBlockCacheComponent() internal.Component { func init() { internal.AddComponent(compName, NewBlockCacheComponent) - blockSizeMb := config.AddFloat64Flag("block-cache-block-size", 0.0, "Size (in MB) of a block to be downloaded for block-cache.") + blockSizeMb := config.AddFloat64Flag( + "block-cache-block-size", + 0.0, + "Size (in MB) of a block to be downloaded for block-cache.", + ) config.BindPFlag(compName+".block-size-mb", blockSizeMb) - blockPoolMb := config.AddUint64Flag("block-cache-pool-size", 0, "Size (in MB) of total memory preallocated for block-cache.") + blockPoolMb := config.AddUint64Flag( + "block-cache-pool-size", + 0, + "Size (in MB) of total memory preallocated for block-cache.", + ) config.BindPFlag(compName+".mem-size-mb", blockPoolMb) - blockCachePath := config.AddStringFlag("block-cache-path", "", "Path to store downloaded blocks.") + blockCachePath := config.AddStringFlag( + "block-cache-path", + "", + "Path to store downloaded blocks.", + ) config.BindPFlag(compName+".path", blockCachePath) - blockDiskMb := config.AddUint64Flag("block-cache-disk-size", 0, "Size (in MB) of total disk capacity that block-cache can use.") + blockDiskMb := config.AddUint64Flag( + "block-cache-disk-size", + 0, + "Size (in MB) of total disk capacity that block-cache can use.", + ) config.BindPFlag(compName+".disk-size-mb", blockDiskMb) - blockDiskTimeout := config.AddUint32Flag("block-cache-disk-timeout", 0, "Timeout (in seconds) for which persisted data remains in disk cache.") + blockDiskTimeout := config.AddUint32Flag( + "block-cache-disk-timeout", + 0, + "Timeout (in seconds) for which persisted data remains in disk cache.", + ) config.BindPFlag(compName+".disk-timeout-sec", blockDiskTimeout) - blockCachePrefetch := config.AddUint32Flag("block-cache-prefetch", 0, "Max number of blocks to prefetch.") + blockCachePrefetch := config.AddUint32Flag( + "block-cache-prefetch", + 0, + "Max number of blocks to prefetch.", + ) config.BindPFlag(compName+".prefetch", blockCachePrefetch) - blockParallelism := config.AddUint32Flag("block-cache-parallelism", 128, "Number of worker thread responsible for upload/download jobs.") + blockParallelism := config.AddUint32Flag( + "block-cache-parallelism", + 128, + "Number of worker thread responsible for upload/download jobs.", + ) config.BindPFlag(compName+".parallelism", blockParallelism) - blockCachePrefetchOnOpen := config.AddBoolFlag("block-cache-prefetch-on-open", false, "Start prefetching on open or wait for first read.") + blockCachePrefetchOnOpen := config.AddBoolFlag( + "block-cache-prefetch-on-open", + false, + "Start prefetching on open or wait for first read.", + ) config.BindPFlag(compName+".prefetch-on-open", blockCachePrefetchOnOpen) - strongConsistency := config.AddBoolFlag("block-cache-strong-consistency", false, "Enable strong data consistency for block cache.") + strongConsistency := config.AddBoolFlag( + "block-cache-strong-consistency", + false, + "Enable strong data consistency for block cache.", + ) config.BindPFlag(compName+".consistency", strongConsistency) } diff --git a/component/block_cache/blockpool.go b/component/block_cache/blockpool.go index 70c5b51c54..8a1281a5da 100644 --- a/component/block_cache/blockpool.go +++ b/component/block_cache/blockpool.go @@ -154,8 +154,12 @@ func (pool *BlockPool) MustGet() (*Block, error) { break // Return error in case no blocks are available after default timeout case <-defaultTimeout: - err := fmt.Errorf("Failed to Allocate Buffer, Len (priorityCh: %d, blockCh: %d), MaxBlocks: %d", - len(pool.priorityCh), len(pool.blocksCh), pool.maxBlocks) + err := fmt.Errorf( + "Failed to Allocate Buffer, Len (priorityCh: %d, blockCh: %d), MaxBlocks: %d", + len(pool.priorityCh), + len(pool.blocksCh), + pool.maxBlocks, + ) log.Err("BlockPool::MustGet : %v", err) return nil, err } diff --git a/component/block_cache/stream.go b/component/block_cache/stream.go index 590b832e9d..b2f991b4fd 100644 --- a/component/block_cache/stream.go +++ b/component/block_cache/stream.go @@ -52,14 +52,14 @@ type Stream struct { } type StreamOptions struct { - BlockSize uint64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` + BlockSize uint64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` BufferSize uint64 `config:"buffer-size-mb" yaml:"buffer-size-mb,omitempty"` - CachedObjLimit uint64 `config:"max-buffers" yaml:"max-buffers,omitempty"` - FileCaching bool `config:"file-caching" yaml:"file-caching,omitempty"` - readOnly bool `config:"read-only" yaml:"-"` + CachedObjLimit uint64 `config:"max-buffers" yaml:"max-buffers,omitempty"` + FileCaching bool `config:"file-caching" yaml:"file-caching,omitempty"` + readOnly bool `config:"read-only" yaml:"-"` // v1 support - StreamCacheMb uint64 `config:"stream-cache-mb" yaml:"-"` + StreamCacheMb uint64 `config:"stream-cache-mb" yaml:"-"` MaxBlocksPerFile uint64 `config:"max-blocks-per-file" yaml:"-"` } @@ -102,12 +102,22 @@ func (st *Stream) Configure(_ bool) error { } if uint64((conf.BufferSize*conf.CachedObjLimit)*mb) > memory.FreeMemory() { - log.Err("Stream::Configure : config error, not enough free memory for provided configuration") + log.Err( + "Stream::Configure : config error, not enough free memory for provided configuration", + ) return errors.New("not enough free memory for provided stream configuration") } - log.Info("Stream to Block Cache::Configure : Buffer size %v, Block size %v, Handle limit %v, FileCaching %v, Read-only %v, StreamCacheMb %v, MaxBlocksPerFile %v", - conf.BufferSize, conf.BlockSize, conf.CachedObjLimit, conf.FileCaching, conf.readOnly, conf.StreamCacheMb, conf.MaxBlocksPerFile) + log.Info( + "Stream to Block Cache::Configure : Buffer size %v, Block size %v, Handle limit %v, FileCaching %v, Read-only %v, StreamCacheMb %v, MaxBlocksPerFile %v", + conf.BufferSize, + conf.BlockSize, + conf.CachedObjLimit, + conf.FileCaching, + conf.readOnly, + conf.StreamCacheMb, + conf.MaxBlocksPerFile, + ) if conf.BlockSize > 0 { config.Set(compName+".block-size-mb", fmt.Sprint(conf.BlockSize)) @@ -123,14 +133,26 @@ func (st *Stream) Configure(_ bool) error { // On init register this component to pipeline and supply your constructor func init() { - blockSizeMb := config.AddFloat64Flag("block-size-mb", 0.0, "Size (in MB) of a block to be downloaded during streaming.") + blockSizeMb := config.AddFloat64Flag( + "block-size-mb", + 0.0, + "Size (in MB) of a block to be downloaded during streaming.", + ) config.BindPFlag(compStream+".block-size-mb", blockSizeMb) - maxBlocksMb := config.AddIntFlag("max-blocks-per-file", 0, "Maximum number of blocks to be cached in memory for streaming.") + maxBlocksMb := config.AddIntFlag( + "max-blocks-per-file", + 0, + "Maximum number of blocks to be cached in memory for streaming.", + ) config.BindPFlag(compStream+".max-blocks-per-file", maxBlocksMb) maxBlocksMb.Hidden = true - streamCacheSize := config.AddUint64Flag("stream-cache-mb", 0, "Limit total amount of data being cached in memory to conserve memory footprint of blobfuse.") + streamCacheSize := config.AddUint64Flag( + "stream-cache-mb", + 0, + "Limit total amount of data being cached in memory to conserve memory footprint of blobfuse.", + ) config.BindPFlag(compStream+".stream-cache-mb", streamCacheSize) streamCacheSize.Hidden = true } diff --git a/component/custom/custom.go b/component/custom/custom.go index 253340f6c6..1d1b2baec7 100644 --- a/component/custom/custom.go +++ b/component/custom/custom.go @@ -72,14 +72,28 @@ func initializePlugins() error { getExternalComponentFunc, err := p.Lookup("GetExternalComponent") if err != nil { - log.Err("initializePlugins: GetExternalComponent function lookup error in plugin %s: %s", file, err.Error()) - return fmt.Errorf("GetExternalComponent function lookup error in plugin %s: %s", file, err.Error()) + log.Err( + "initializePlugins: GetExternalComponent function lookup error in plugin %s: %s", + file, + err.Error(), + ) + return fmt.Errorf( + "GetExternalComponent function lookup error in plugin %s: %s", + file, + err.Error(), + ) } getExternalComponent, ok := getExternalComponentFunc.(func() (string, func() exported.Component)) if !ok { - log.Err("initializePlugins: GetExternalComponent function in %s has some incorrect definition", file) - return fmt.Errorf("GetExternalComponent function in %s has some incorrect definition", file) + log.Err( + "initializePlugins: GetExternalComponent function in %s has some incorrect definition", + file, + ) + return fmt.Errorf( + "GetExternalComponent function in %s has some incorrect definition", + file, + ) } compName, initExternalComponent := getExternalComponent() diff --git a/component/entry_cache/entry_cache.go b/component/entry_cache/entry_cache.go index 7035e4e578..23331a9825 100644 --- a/component/entry_cache/entry_cache.go +++ b/component/entry_cache/entry_cache.go @@ -126,7 +126,9 @@ func (c *EntryCache) Configure(_ bool) error { } if !readonly { - log.Err("EntryCache::Configure : EntryCache component should be used only in read-only mode") + log.Err( + "EntryCache::Configure : EntryCache component should be used only in read-only mode", + ) return fmt.Errorf("EntryCache component should be used in only in read-only mode") } @@ -155,7 +157,9 @@ func (c *EntryCache) Configure(_ bool) error { } // StreamDir : Optionally cache entries of the list -func (c *EntryCache) StreamDir(options internal.StreamDirOptions) ([]*internal.ObjAttr, string, error) { +func (c *EntryCache) StreamDir( + options internal.StreamDirOptions, +) ([]*internal.ObjAttr, string, error) { log.Trace("AttrCache::StreamDir : %s", options.Name) pathKey := fmt.Sprintf("%s##%s", options.Name, options.Token) @@ -165,7 +169,11 @@ func (c *EntryCache) StreamDir(options internal.StreamDirOptions) ([]*internal.O pathEntry, found := c.pathMap.Load(pathKey) if !found { - log.Debug("EntryCache::StreamDir : Cache not valid, fetch new list for path: %s, token %s", options.Name, options.Token) + log.Debug( + "EntryCache::StreamDir : Cache not valid, fetch new list for path: %s, token %s", + options.Name, + options.Token, + ) pathList, token, err := c.NextComponent().StreamDir(options) if err == nil && len(pathList) > 0 { item := pathCacheItem{ @@ -209,6 +217,10 @@ func NewEntryCacheComponent() internal.Component { func init() { internal.AddComponent(compName, NewEntryCacheComponent) - entryTimeout := config.AddUint32Flag("list-cache-timeout", defaultEntryCacheTimeout, "list entry timeout") + entryTimeout := config.AddUint32Flag( + "list-cache-timeout", + defaultEntryCacheTimeout, + "list entry timeout", + ) config.BindPFlag(compName+".timeout-sec", entryTimeout) } diff --git a/component/file_cache/cache_policy.go b/component/file_cache/cache_policy.go index 80407f3f84..35283172d2 100644 --- a/component/file_cache/cache_policy.go +++ b/component/file_cache/cache_policy.go @@ -82,7 +82,11 @@ func getUsagePercentage(path string, maxSize float64) float64 { if maxSize == 0 { currSize, usagePercent, err = common.GetDiskUsageFromStatfs(path) if err != nil { - log.Err("cachePolicy::getUsagePercentage : failed to get disk usage for %s [%v]", path, err.Error) + log.Err( + "cachePolicy::getUsagePercentage : failed to get disk usage for %s [%v]", + path, + err.Error, + ) } } else { // We need to compuate % usage of temp directory against configured limit @@ -96,8 +100,16 @@ func getUsagePercentage(path string, maxSize float64) float64 { log.Debug("cachePolicy::getUsagePercentage : current cache usage : %f%%", usagePercent) - fileCacheStatsCollector.UpdateStats(stats_manager.Replace, cacheUsage, fmt.Sprintf("%f MB", currSize)) - fileCacheStatsCollector.UpdateStats(stats_manager.Replace, usgPer, fmt.Sprintf("%f%%", usagePercent)) + fileCacheStatsCollector.UpdateStats( + stats_manager.Replace, + cacheUsage, + fmt.Sprintf("%f MB", currSize), + ) + fileCacheStatsCollector.UpdateStats( + stats_manager.Replace, + usgPer, + fmt.Sprintf("%f%%", usagePercent), + ) return usagePercent } diff --git a/component/file_cache/file_cache.go b/component/file_cache/file_cache.go index 6c48d949d2..90c014dfb7 100644 --- a/component/file_cache/file_cache.go +++ b/component/file_cache/file_cache.go @@ -89,31 +89,31 @@ type FileCache struct { // Structure defining your config parameters type FileCacheOptions struct { // e.g. var1 uint32 `config:"var1"` - TmpPath string `config:"path" yaml:"path,omitempty"` + TmpPath string `config:"path" yaml:"path,omitempty"` Policy string `config:"policy" yaml:"policy,omitempty"` - Timeout uint32 `config:"timeout-sec" yaml:"timeout-sec,omitempty"` + Timeout uint32 `config:"timeout-sec" yaml:"timeout-sec,omitempty"` MaxEviction uint32 `config:"max-eviction" yaml:"max-eviction,omitempty"` - MaxSizeMB float64 `config:"max-size-mb" yaml:"max-size-mb,omitempty"` + MaxSizeMB float64 `config:"max-size-mb" yaml:"max-size-mb,omitempty"` HighThreshold uint32 `config:"high-threshold" yaml:"high-threshold,omitempty"` - LowThreshold uint32 `config:"low-threshold" yaml:"low-threshold,omitempty"` + LowThreshold uint32 `config:"low-threshold" yaml:"low-threshold,omitempty"` - CreateEmptyFile bool `config:"create-empty-file" yaml:"create-empty-file,omitempty"` + CreateEmptyFile bool `config:"create-empty-file" yaml:"create-empty-file,omitempty"` AllowNonEmpty bool `config:"allow-non-empty-temp" yaml:"allow-non-empty-temp,omitempty"` - CleanupOnStart bool `config:"cleanup-on-start" yaml:"cleanup-on-start,omitempty"` + CleanupOnStart bool `config:"cleanup-on-start" yaml:"cleanup-on-start,omitempty"` EnablePolicyTrace bool `config:"policy-trace" yaml:"policy-trace,omitempty"` - OffloadIO bool `config:"offload-io" yaml:"offload-io,omitempty"` + OffloadIO bool `config:"offload-io" yaml:"offload-io,omitempty"` // v1 support V1Timeout uint32 `config:"file-cache-timeout-in-seconds" yaml:"-"` - EmptyDirCheck bool `config:"empty-dir-check" yaml:"-"` - SyncToFlush bool `config:"sync-to-flush" yaml:"sync-to-flush,omitempty"` - SyncNoOp bool `config:"ignore-sync" yaml:"ignore-sync,omitempty"` + EmptyDirCheck bool `config:"empty-dir-check" yaml:"-"` + SyncToFlush bool `config:"sync-to-flush" yaml:"sync-to-flush,omitempty"` + SyncNoOp bool `config:"ignore-sync" yaml:"ignore-sync,omitempty"` RefreshSec uint32 `config:"refresh-sec" yaml:"refresh-sec,omitempty"` - HardLimit bool `config:"hard-limit" yaml:"hard-limit,omitempty"` + HardLimit bool `config:"hard-limit" yaml:"hard-limit,omitempty"` } const ( @@ -131,55 +131,55 @@ var _ internal.Component = &FileCache{} var fileCacheStatsCollector *stats_manager.StatsCollector -func (c *FileCache) Name() string { +func (fc *FileCache) Name() string { return compName } -func (c *FileCache) SetName(name string) { - c.BaseComponent.SetName(name) +func (fc *FileCache) SetName(name string) { + fc.BaseComponent.SetName(name) } -func (c *FileCache) SetNextComponent(nc internal.Component) { - c.BaseComponent.SetNextComponent(nc) +func (fc *FileCache) SetNextComponent(nc internal.Component) { + fc.BaseComponent.SetNextComponent(nc) } -func (c *FileCache) Priority() internal.ComponentPriority { +func (fc *FileCache) Priority() internal.ComponentPriority { return internal.EComponentPriority.LevelMid() } // Start : Pipeline calls this method to start the component functionality // // this shall not block the call otherwise pipeline will not start -func (c *FileCache) Start(ctx context.Context) error { - log.Trace("Starting component : %s", c.Name()) +func (fc *FileCache) Start(ctx context.Context) error { + log.Trace("Starting component : %s", fc.Name()) - if c.policy == nil { - return fmt.Errorf("config error in %s error [cache policy missing]", c.Name()) + if fc.policy == nil { + return fmt.Errorf("config error in %s error [cache policy missing]", fc.Name()) } - err := c.policy.StartPolicy() + err := fc.policy.StartPolicy() if err != nil { - return fmt.Errorf("config error in %s error [fail to start policy]", c.Name()) + return fmt.Errorf("config error in %s error [fail to start policy]", fc.Name()) } // create stats collector for file cache - fileCacheStatsCollector = stats_manager.NewStatsCollector(c.Name()) + fileCacheStatsCollector = stats_manager.NewStatsCollector(fc.Name()) return nil } // Stop : Stop the component functionality and kill all threads started -func (c *FileCache) Stop() error { - log.Trace("Stopping component : %s", c.Name()) +func (fc *FileCache) Stop() error { + log.Trace("Stopping component : %s", fc.Name()) // Wait for all async upload to complete if any - if c.lazyWrite { + if fc.lazyWrite { log.Info("FileCache::Stop : Waiting for async close to complete") - c.fileCloseOpt.Wait() + fc.fileCloseOpt.Wait() } - _ = c.policy.ShutdownPolicy() - _ = common.TempCacheCleanup(c.tmpPath) + _ = fc.policy.ShutdownPolicy() + _ = common.TempCacheCleanup(fc.tmpPath) fileCacheStatsCollector.Destroy() @@ -187,11 +187,11 @@ func (c *FileCache) Stop() error { } // GenConfig : Generate default config for the component -func (c *FileCache) GenConfig() string { +func (fc *FileCache) GenConfig() string { log.Info("FileCache::Configure : config generation started") var sb strings.Builder - sb.WriteString(fmt.Sprintf("\n%s:", c.Name())) + sb.WriteString(fmt.Sprintf("\n%s:", fc.Name())) tmpPath := "" _ = config.UnmarshalKey("tmp-path", &tmpPath) @@ -213,121 +213,129 @@ func (c *FileCache) GenConfig() string { // Configure : Pipeline will call this method after constructor so that you can read config and initialize yourself // // Return failure if any config is not valid to exit the process -func (c *FileCache) Configure(_ bool) error { - log.Trace("FileCache::Configure : %s", c.Name()) +func (fc *FileCache) Configure(_ bool) error { + log.Trace("FileCache::Configure : %s", fc.Name()) conf := FileCacheOptions{} err := config.UnmarshalKey(compName, &conf) if err != nil { log.Err("FileCache: config error [invalid config attributes]") - return fmt.Errorf("config error in %s [%s]", c.Name(), err.Error()) + return fmt.Errorf("config error in %s [%s]", fc.Name(), err.Error()) } - c.createEmptyFile = conf.CreateEmptyFile + fc.createEmptyFile = conf.CreateEmptyFile if config.IsSet(compName + ".file-cache-timeout-in-seconds") { - c.cacheTimeout = float64(conf.V1Timeout) + fc.cacheTimeout = float64(conf.V1Timeout) } else if config.IsSet(compName + ".timeout-sec") { - c.cacheTimeout = float64(conf.Timeout) + fc.cacheTimeout = float64(conf.Timeout) } else { - c.cacheTimeout = float64(defaultFileCacheTimeout) + fc.cacheTimeout = float64(defaultFileCacheTimeout) } directIO := false _ = config.UnmarshalKey("direct-io", &directIO) if directIO { - c.cacheTimeout = 0 + fc.cacheTimeout = 0 log.Crit("FileCache::Configure : Direct IO mode enabled, cache timeout is set to 0") } if config.IsSet(compName + ".empty-dir-check") { - c.allowNonEmpty = !conf.EmptyDirCheck + fc.allowNonEmpty = !conf.EmptyDirCheck } else { - c.allowNonEmpty = conf.AllowNonEmpty + fc.allowNonEmpty = conf.AllowNonEmpty } - c.policyTrace = conf.EnablePolicyTrace - c.offloadIO = conf.OffloadIO - c.syncToFlush = conf.SyncToFlush - c.syncToDelete = !conf.SyncNoOp - c.refreshSec = conf.RefreshSec - c.hardLimit = true + fc.policyTrace = conf.EnablePolicyTrace + fc.offloadIO = conf.OffloadIO + fc.syncToFlush = conf.SyncToFlush + fc.syncToDelete = !conf.SyncNoOp + fc.refreshSec = conf.RefreshSec + fc.hardLimit = true - err = config.UnmarshalKey("lazy-write", &c.lazyWrite) + err = config.UnmarshalKey("lazy-write", &fc.lazyWrite) if err != nil { log.Err("FileCache: config error [unable to obtain lazy-write]") - return fmt.Errorf("config error in %s [%s]", c.Name(), err.Error()) + return fmt.Errorf("config error in %s [%s]", fc.Name(), err.Error()) } - c.tmpPath = common.ExpandPath(conf.TmpPath) - if c.tmpPath == "" { + fc.tmpPath = common.ExpandPath(conf.TmpPath) + if fc.tmpPath == "" { log.Err("FileCache: config error [tmp-path not set]") - return fmt.Errorf("config error in %s error [tmp-path not set]", c.Name()) + return fmt.Errorf("config error in %s error [tmp-path not set]", fc.Name()) } - err = config.UnmarshalKey("mount-path", &c.mountPath) + err = config.UnmarshalKey("mount-path", &fc.mountPath) if err != nil { log.Err("FileCache: config error [unable to obtain Mount Path]") - return fmt.Errorf("config error in %s [%s]", c.Name(), err.Error()) + return fmt.Errorf("config error in %s [%s]", fc.Name(), err.Error()) } - if c.mountPath == c.tmpPath { + if fc.mountPath == fc.tmpPath { log.Err("FileCache: config error [tmp-path is same as mount path]") - return fmt.Errorf("config error in %s error [tmp-path is same as mount path]", c.Name()) + return fmt.Errorf("config error in %s error [tmp-path is same as mount path]", fc.Name()) } // Extract values from 'conf' and store them as you wish here - _, err = os.Stat(c.tmpPath) + _, err = os.Stat(fc.tmpPath) if os.IsNotExist(err) { log.Err("FileCache: config error [tmp-path does not exist. attempting to create tmp-path.]") - err := os.MkdirAll(c.tmpPath, os.FileMode(0755)) + err := os.MkdirAll(fc.tmpPath, os.FileMode(0755)) if err != nil { log.Err("FileCache: config error creating directory after clean [%s]", err.Error()) - return fmt.Errorf("config error in %s [%s]", c.Name(), err.Error()) + return fmt.Errorf("config error in %s [%s]", fc.Name(), err.Error()) } } var stat syscall.Statfs_t - err = syscall.Statfs(c.tmpPath, &stat) + err = syscall.Statfs(fc.tmpPath, &stat) if err != nil { - log.Err("FileCache::Configure : config error %s [%s]. Assigning a default value of 4GB or if any value is assigned to .disk-size-mb in config.", c.Name(), err.Error()) - c.maxCacheSize = 4192 + log.Err( + "FileCache::Configure : config error %s [%s]. Assigning a default value of 4GB or if any value is assigned to .disk-size-mb in config.", + fc.Name(), + err.Error(), + ) + fc.maxCacheSize = 4192 } else { - c.maxCacheSize = (0.8 * float64(stat.Bavail) * float64(stat.Bsize)) / (MB) + fc.maxCacheSize = (0.8 * float64(stat.Bavail) * float64(stat.Bsize)) / (MB) } if config.IsSet(compName+".max-size-mb") && conf.MaxSizeMB != 0 { - c.maxCacheSize = conf.MaxSizeMB + fc.maxCacheSize = conf.MaxSizeMB } - if !isLocalDirEmpty(c.tmpPath) && !c.allowNonEmpty { - log.Err("FileCache: config error %s directory is not empty", c.tmpPath) - return fmt.Errorf("config error in %s [%s]", c.Name(), "temp directory not empty") + if !isLocalDirEmpty(fc.tmpPath) && !fc.allowNonEmpty { + log.Err("FileCache: config error %s directory is not empty", fc.tmpPath) + return fmt.Errorf("config error in %s [%s]", fc.Name(), "temp directory not empty") } - err = config.UnmarshalKey("allow-other", &c.allowOther) + err = config.UnmarshalKey("allow-other", &fc.allowOther) if err != nil { log.Err("FileCache::Configure : config error [unable to obtain allow-other]") - return fmt.Errorf("config error in %s [%s]", c.Name(), err.Error()) + return fmt.Errorf("config error in %s [%s]", fc.Name(), err.Error()) } - if c.allowOther { - c.defaultPermission = common.DefaultAllowOtherPermissionBits + if fc.allowOther { + fc.defaultPermission = common.DefaultAllowOtherPermissionBits } else { - c.defaultPermission = common.DefaultFilePermissionBits + fc.defaultPermission = common.DefaultFilePermissionBits } - cacheConfig := c.GetPolicyConfig(conf) - c.policy = NewLRUPolicy(cacheConfig) + cacheConfig := fc.GetPolicyConfig(conf) + fc.policy = NewLRUPolicy(cacheConfig) - if c.policy == nil { + if fc.policy == nil { log.Err("FileCache::Configure : failed to create cache eviction policy") - return fmt.Errorf("config error in %s [%s]", c.Name(), "failed to create cache policy") + return fmt.Errorf("config error in %s [%s]", fc.Name(), "failed to create cache policy") } if config.IsSet(compName + ".background-download") { - log.Warn("unsupported v1 CLI parameter: background-download is not supported in blobfuse2. Consider using the streaming component.") + log.Warn( + "unsupported v1 CLI parameter: background-download is not supported in blobfuse2. Consider using the streaming component.", + ) } if config.IsSet(compName + ".cache-poll-timeout-msec") { - log.Warn("unsupported v1 CLI parameter: cache-poll-timeout-msec is not supported in blobfuse2. Polling occurs every timeout interval.") + log.Warn( + "unsupported v1 CLI parameter: cache-poll-timeout-msec is not supported in blobfuse2. Polling occurs every timeout interval.", + ) } if config.IsSet(compName + ".upload-modified-only") { log.Warn("unsupported v1 CLI parameter: upload-modified-only is always true in blobfuse2.") @@ -336,23 +344,44 @@ func (c *FileCache) Configure(_ bool) error { log.Warn("Sync will upload current contents of file.") } if config.IsSet(compName + ".hard-limit") { - c.hardLimit = conf.HardLimit - } - - c.diskHighWaterMark = 0 - if c.hardLimit && c.maxCacheSize != 0 { - c.diskHighWaterMark = (((c.maxCacheSize * MB) * float64(cacheConfig.highThreshold)) / 100) - } - - log.Crit("FileCache::Configure : create-empty %t, cache-timeout %d, tmp-path %s, max-size-mb %d, high-mark %d, low-mark %d, refresh-sec %v, max-eviction %v, hard-limit %v, policy %s, allow-non-empty-temp %t, cleanup-on-start %t, policy-trace %t, offload-io %t, sync-to-flush %t, ignore-sync %t, defaultPermission %v, diskHighWaterMark %v, maxCacheSize %v, mountPath %v", - c.createEmptyFile, int(c.cacheTimeout), c.tmpPath, int(c.maxCacheSize), int(cacheConfig.highThreshold), int(cacheConfig.lowThreshold), c.refreshSec, cacheConfig.maxEviction, c.hardLimit, conf.Policy, c.allowNonEmpty, conf.CleanupOnStart, c.policyTrace, c.offloadIO, c.syncToFlush, c.syncToDelete, c.defaultPermission, c.diskHighWaterMark, c.maxCacheSize, c.mountPath) + fc.hardLimit = conf.HardLimit + } + + fc.diskHighWaterMark = 0 + if fc.hardLimit && fc.maxCacheSize != 0 { + fc.diskHighWaterMark = (((fc.maxCacheSize * MB) * float64(cacheConfig.highThreshold)) / 100) + } + + log.Crit( + "FileCache::Configure : create-empty %t, cache-timeout %d, tmp-path %s, max-size-mb %d, high-mark %d, low-mark %d, refresh-sec %v, max-eviction %v, hard-limit %v, policy %s, allow-non-empty-temp %t, cleanup-on-start %t, policy-trace %t, offload-io %t, sync-to-flush %t, ignore-sync %t, defaultPermission %v, diskHighWaterMark %v, maxCacheSize %v, mountPath %v", + fc.createEmptyFile, + int(fc.cacheTimeout), + fc.tmpPath, + int(fc.maxCacheSize), + int(cacheConfig.highThreshold), + int(cacheConfig.lowThreshold), + fc.refreshSec, + cacheConfig.maxEviction, + fc.hardLimit, + conf.Policy, + fc.allowNonEmpty, + conf.CleanupOnStart, + fc.policyTrace, + fc.offloadIO, + fc.syncToFlush, + fc.syncToDelete, + fc.defaultPermission, + fc.diskHighWaterMark, + fc.maxCacheSize, + fc.mountPath, + ) return nil } // OnConfigChange : If component has registered, on config file change this method is called -func (c *FileCache) OnConfigChange() { - log.Trace("FileCache::OnConfigChange : %s", c.Name()) +func (fc *FileCache) OnConfigChange() { + log.Trace("FileCache::OnConfigChange : %s", fc.Name()) conf := FileCacheOptions{} err := config.UnmarshalKey(compName, &conf) @@ -360,27 +389,27 @@ func (c *FileCache) OnConfigChange() { log.Err("FileCache: config error [invalid config attributes]") } - c.createEmptyFile = conf.CreateEmptyFile - c.cacheTimeout = float64(conf.Timeout) - c.policyTrace = conf.EnablePolicyTrace - c.offloadIO = conf.OffloadIO - c.maxCacheSize = conf.MaxSizeMB - c.syncToFlush = conf.SyncToFlush - c.syncToDelete = !conf.SyncNoOp - _ = c.policy.UpdateConfig(c.GetPolicyConfig(conf)) + fc.createEmptyFile = conf.CreateEmptyFile + fc.cacheTimeout = float64(conf.Timeout) + fc.policyTrace = conf.EnablePolicyTrace + fc.offloadIO = conf.OffloadIO + fc.maxCacheSize = conf.MaxSizeMB + fc.syncToFlush = conf.SyncToFlush + fc.syncToDelete = !conf.SyncNoOp + _ = fc.policy.UpdateConfig(fc.GetPolicyConfig(conf)) } -func (c *FileCache) StatFs() (*syscall.Statfs_t, bool, error) { +func (fc *FileCache) StatFs() (*syscall.Statfs_t, bool, error) { // cache_size = f_blocks * f_frsize/1024 // cache_size - used = f_frsize * f_bavail/1024 // cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024 // if cache size is set to 0 then we have the root mount usage - maxCacheSize := c.maxCacheSize * MB + maxCacheSize := fc.maxCacheSize * MB if maxCacheSize == 0 { return nil, false, nil } - usage, _ := common.GetUsage(c.tmpPath) + usage, _ := common.GetUsage(fc.tmpPath) usage = usage * MB available := maxCacheSize - usage @@ -397,7 +426,7 @@ func (c *FileCache) StatFs() (*syscall.Statfs_t, bool, error) { return statfs, true, nil } -func (c *FileCache) GetPolicyConfig(conf FileCacheOptions) cachePolicyConfig { +func (fc *FileCache) GetPolicyConfig(conf FileCacheOptions) cachePolicyConfig { // A user provided value of 0 doesn't make sense for MaxEviction, HighThreshold or LowThreshold. if conf.MaxEviction == 0 { conf.MaxEviction = defaultMaxEviction @@ -410,13 +439,13 @@ func (c *FileCache) GetPolicyConfig(conf FileCacheOptions) cachePolicyConfig { } cacheConfig := cachePolicyConfig{ - tmpPath: c.tmpPath, + tmpPath: fc.tmpPath, maxEviction: conf.MaxEviction, highThreshold: float64(conf.HighThreshold), lowThreshold: float64(conf.LowThreshold), - cacheTimeout: uint32(c.cacheTimeout), - maxSizeMB: c.maxCacheSize, - fileLocks: c.fileLocks, + cacheTimeout: uint32(fc.cacheTimeout), + maxSizeMB: fc.maxCacheSize, + fileLocks: fc.fileLocks, policyTrace: conf.EnablePolicyTrace, } @@ -448,7 +477,11 @@ func (fc *FileCache) invalidateDirectory(name string) { // TODO : wouldn't this cause a race condition? a thread might get the lock before we purge - and the file would be non-existent err = filepath.WalkDir(localPath, func(path string, d fs.DirEntry, err error) error { if err == nil && d != nil { - log.Debug("FileCache::invalidateDirectory : %s (%d) getting removed from cache", path, d.IsDir()) + log.Debug( + "FileCache::invalidateDirectory : %s (%d) getting removed from cache", + path, + d.IsDir(), + ) if !d.IsDir() { fc.policy.CachePurge(path) } else { @@ -459,7 +492,11 @@ func (fc *FileCache) invalidateDirectory(name string) { }) if err != nil { - log.Debug("FileCache::invalidateDirectory : Failed to iterate directory %s [%s].", localPath, err.Error()) + log.Debug( + "FileCache::invalidateDirectory : Failed to iterate directory %s [%s].", + localPath, + err.Error(), + ) return } @@ -571,7 +608,9 @@ func (fc *FileCache) ReadDir(options internal.ReadDirOptions) ([]*internal.ObjAt } // StreamDir : Add local files to the list retrieved from storage container -func (fc *FileCache) StreamDir(options internal.StreamDirOptions) ([]*internal.ObjAttr, string, error) { +func (fc *FileCache) StreamDir( + options internal.StreamDirOptions, +) ([]*internal.ObjAttr, string, error) { attrs, token, err := fc.NextComponent().StreamDir(options) if token == "" { @@ -624,7 +663,11 @@ func (fc *FileCache) IsDirEmpty(options internal.IsDirEmptyOptions) bool { // return the result. cleanup, err := fc.deleteEmptyDirs(internal.DeleteDirOptions(options)) if err != nil { - log.Debug("FileCache::IsDirEmpty : %s failed to delete empty directories [%s]", options.Name, err.Error()) + log.Debug( + "FileCache::IsDirEmpty : %s failed to delete empty directories [%s]", + options.Name, + err.Error(), + ) return false } @@ -646,7 +689,11 @@ func (fc *FileCache) deleteEmptyDirs(options internal.DeleteDirOptions) (bool, e return true, nil } - log.Debug("FileCache::DeleteEmptyDirs : Unable to read directory %s [%s]", localPath, err.Error()) + log.Debug( + "FileCache::DeleteEmptyDirs : Unable to read directory %s [%s]", + localPath, + err.Error(), + ) return false, err } @@ -656,7 +703,11 @@ func (fc *FileCache) deleteEmptyDirs(options internal.DeleteDirOptions) (bool, e Name: filepath.Join(localPath, entry.Name()), }) if err != nil { - log.Err("FileCache::deleteEmptyDirs : Unable to delete directory %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::deleteEmptyDirs : Unable to delete directory %s [%s]", + localPath, + err.Error(), + ) return val, err } } else { @@ -718,14 +769,22 @@ func (fc *FileCache) CreateFile(options internal.CreateFileOptions) (*handlemap. err := os.MkdirAll(filepath.Dir(localPath), fc.defaultPermission) if err != nil { - log.Err("FileCache::CreateFile : unable to create local directory %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::CreateFile : unable to create local directory %s [%s]", + options.Name, + err.Error(), + ) return nil, err } // Open the file and grab a shared lock to prevent deletion by the cache policy. f, err := os.OpenFile(localPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, options.Mode) if err != nil { - log.Err("FileCache::CreateFile : error opening local file %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::CreateFile : error opening local file %s [%s]", + options.Name, + err.Error(), + ) return nil, err } // The user might change permissions WHILE creating the file therefore we need to account for that @@ -759,7 +818,12 @@ func (fc *FileCache) CreateFile(options internal.CreateFileOptions) (*handlemap. // err: the storage error // method: the caller method name // recoverable: whether or not case 2 is recoverable on flush/close of the file -func (fc *FileCache) validateStorageError(path string, err error, method string, recoverable bool) error { +func (fc *FileCache) validateStorageError( + path string, + err error, + method string, + recoverable bool, +) error { // For methods that take in file name, the goal is to update the path in storage and the local cache. // See comments in GetAttr for the different situations we can run into. This specifically handles case 2. if err != nil { @@ -770,7 +834,9 @@ func (fc *FileCache) validateStorageError(path string, err error, method string, // (policy might not think the file exists if the file is merely marked for evication and not actually evicted yet) localPath := filepath.Join(fc.tmpPath, path) _, err := os.Stat(localPath) - if os.IsNotExist(err) { // If the file is not in the local cache, then the file does not exist. + if os.IsNotExist( + err, + ) { // If the file is not in the local cache, then the file does not exist. log.Err("FileCache::%s : %s does not exist in local cache", method, path) return syscall.ENOENT } else { @@ -808,7 +874,11 @@ func (fc *FileCache) DeleteFile(options internal.DeleteFileOptions) error { localPath := filepath.Join(fc.tmpPath, options.Name) err = deleteFile(localPath) if err != nil && !os.IsNotExist(err) { - log.Err("FileCache::DeleteFile : failed to delete local file %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::DeleteFile : failed to delete local file %s [%s]", + localPath, + err.Error(), + ) } fc.policy.CachePurge(localPath) @@ -817,7 +887,11 @@ func (fc *FileCache) DeleteFile(options internal.DeleteFileOptions) error { } // isDownloadRequired: Whether or not the file needs to be downloaded to local cache. -func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock *common.LockMapItem) (bool, bool, *internal.ObjAttr, error) { +func (fc *FileCache) isDownloadRequired( + localPath string, + blobPath string, + flock *common.LockMapItem, +) (bool, bool, *internal.ObjAttr, error) { fileExists := false downloadRequired := false lmt := time.Time{} @@ -862,7 +936,10 @@ func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock if fileExists && flock.Count() > 0 { // file exists in local cache and there is already an handle open for it // In this case we can not redownload the file from container - log.Info("FileCache::isDownloadRequired : Need to re-download %s, but skipping as handle is already open", blobPath) + log.Info( + "FileCache::isDownloadRequired : Need to re-download %s, but skipping as handle is already open", + blobPath, + ) downloadRequired = false } @@ -872,7 +949,11 @@ func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock (fc.refreshSec != 0 && time.Since(flock.DownloadTime()).Seconds() > float64(fc.refreshSec)) { attr, err = fc.NextComponent().GetAttr(internal.GetAttrOptions{Name: blobPath}) if err != nil { - log.Err("FileCache::isDownloadRequired : Failed to get attr of %s [%s]", blobPath, err.Error()) + log.Err( + "FileCache::isDownloadRequired : Failed to get attr of %s [%s]", + blobPath, + err.Error(), + ) } } @@ -883,8 +964,14 @@ func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock // If time matches but size does not then still we need to redownlaod the file. if attr.Mtime.After(lmt) || stat.Size != attr.Size { // File has not been modified at storage yet so no point in redownloading the file - log.Info("FileCache::isDownloadRequired : File is modified in container, so forcing redownload %s [A-%v : L-%v] [A-%v : L-%v]", - blobPath, attr.Mtime, lmt, attr.Size, stat.Size) + log.Info( + "FileCache::isDownloadRequired : File is modified in container, so forcing redownload %s [A-%v : L-%v] [A-%v : L-%v]", + blobPath, + attr.Mtime, + lmt, + attr.Size, + stat.Size, + ) downloadRequired = true // As we have decided to continue using old file, we reset the timer to check again after refresh time interval @@ -899,7 +986,12 @@ func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock // OpenFile: Makes the file available in the local cache for further file operations. func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Handle, error) { - log.Trace("FileCache::OpenFile : name=%s, flags=%d, mode=%s", options.Name, options.Flags, options.Mode) + log.Trace( + "FileCache::OpenFile : name=%s, flags=%d, mode=%s", + options.Name, + options.Flags, + options.Mode, + ) localPath := filepath.Join(fc.tmpPath, options.Name) var f *os.File @@ -944,7 +1036,11 @@ func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Hand // Open the file in write mode. f, err = os.OpenFile(localPath, os.O_CREATE|os.O_RDWR, options.Mode) if err != nil { - log.Err("FileCache::OpenFile : error creating new file %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::OpenFile : error creating new file %s [%s]", + options.Name, + err.Error(), + ) return nil, err } @@ -956,7 +1052,10 @@ func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Hand if fc.diskHighWaterMark != 0 { currSize, err := common.GetUsage(fc.tmpPath) if err != nil { - log.Err("FileCache::OpenFile : error getting current usage of cache [%s]", err.Error()) + log.Err( + "FileCache::OpenFile : error getting current usage of cache [%s]", + err.Error(), + ) } else { if (currSize + float64(fileSize)) > fc.diskHighWaterMark { log.Err("FileCache::OpenFile : cache size limit reached [%f] failed to open %s", currSize, options.Name) @@ -980,11 +1079,19 @@ func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Hand }) if err != nil { // File was created locally and now download has failed so we need to delete it back from local cache - log.Err("FileCache::OpenFile : error downloading file from storage %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::OpenFile : error downloading file from storage %s [%s]", + options.Name, + err.Error(), + ) _ = f.Close() err = os.Remove(localPath) if err != nil { - log.Err("FileCache::OpenFile : Failed to remove file %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::OpenFile : Failed to remove file %s [%s]", + localPath, + err.Error(), + ) } return nil, err } @@ -1005,7 +1112,11 @@ func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Hand // If user has selected some non default mode in config then every local file shall be created with that mode only err = os.Chmod(localPath, fileMode) if err != nil { - log.Err("FileCache::OpenFile : Failed to change mode of file %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::OpenFile : Failed to change mode of file %s [%s]", + options.Name, + err.Error(), + ) } // TODO: When chown is supported should we update that? @@ -1013,7 +1124,11 @@ func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Hand // chtimes shall be the last api otherwise calling chmod/chown will update the last change time err = os.Chtimes(localPath, attr.Atime, attr.Mtime) if err != nil { - log.Err("FileCache::OpenFile : Failed to change times of file %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::OpenFile : Failed to change times of file %s [%s]", + options.Name, + err.Error(), + ) } } @@ -1026,7 +1141,11 @@ func (fc *FileCache) OpenFile(options internal.OpenFileOptions) (*handlemap.Hand // Open the file and grab a shared lock to prevent deletion by the cache policy. f, err = os.OpenFile(localPath, options.Flags, options.Mode) if err != nil { - log.Err("FileCache::OpenFile : error opening cached file %s [%s]", options.Name, err.Error()) + log.Err( + "FileCache::OpenFile : error opening cached file %s [%s]", + options.Name, + err.Error(), + ) return nil, err } @@ -1072,8 +1191,15 @@ func (fc *FileCache) CloseFile(options internal.CloseFileOptions) error { } // closeFileInternal: Actual handling of the close file goes here -func (fc *FileCache) closeFileInternal(options internal.CloseFileOptions, flock *common.LockMapItem) error { - log.Trace("FileCache::closeFileInternal : name=%s, handle=%d", options.Handle.Path, options.Handle.ID) +func (fc *FileCache) closeFileInternal( + options internal.CloseFileOptions, + flock *common.LockMapItem, +) error { + log.Trace( + "FileCache::closeFileInternal : name=%s, handle=%d", + options.Handle.Path, + options.Handle.ID, + ) // Lock is acquired by CloseFile, at end of this method we need to unlock // If its async call file shall be locked till the upload completes. @@ -1082,7 +1208,9 @@ func (fc *FileCache) closeFileInternal(options internal.CloseFileOptions, flock localPath := filepath.Join(fc.tmpPath, options.Handle.Path) - err := fc.FlushFile(internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}) //nolint + err := fc.FlushFile( + internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}, + ) //nolint if err != nil { log.Err("FileCache::closeFileInternal : failed to flush file %s", options.Handle.Path) return err @@ -1090,13 +1218,21 @@ func (fc *FileCache) closeFileInternal(options internal.CloseFileOptions, flock f := options.Handle.GetFileObject() if f == nil { - log.Err("FileCache::closeFileInternal : error [missing fd in handle object] %s", options.Handle.Path) + log.Err( + "FileCache::closeFileInternal : error [missing fd in handle object] %s", + options.Handle.Path, + ) return syscall.EBADF } err = f.Close() if err != nil { - log.Err("FileCache::closeFileInternal : error closing file %s(%d) [%s]", options.Handle.Path, int(f.Fd()), err.Error()) + log.Err( + "FileCache::closeFileInternal : error closing file %s(%d) [%s]", + options.Handle.Path, + int(f.Fd()), + err.Error(), + ) return err } flock.Dec() @@ -1108,7 +1244,11 @@ func (fc *FileCache) closeFileInternal(options internal.CloseFileOptions, flock err = deleteFile(localPath) if err != nil && !os.IsNotExist(err) { - log.Err("FileCache::closeFileInternal : failed to delete local file %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::closeFileInternal : failed to delete local file %s [%s]", + localPath, + err.Error(), + ) } fc.policy.CachePurge(localPath) @@ -1156,7 +1296,10 @@ func (fc *FileCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, e f := options.Handle.GetFileObject() if f == nil { - log.Err("FileCache::ReadInBuffer : error [couldn't find fd in handle] %s", options.Handle.Path) + log.Err( + "FileCache::ReadInBuffer : error [couldn't find fd in handle] %s", + options.Handle.Path, + ) return 0, syscall.EBADF } @@ -1222,7 +1365,9 @@ func (fc *FileCache) WriteFile(options *internal.WriteFileOptions) (int, error) func (fc *FileCache) SyncFile(options internal.SyncFileOptions) error { log.Trace("FileCache::SyncFile : handle=%d, path=%s", options.Handle.ID, options.Handle.Path) if fc.syncToFlush { - err := fc.FlushFile(internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}) //nolint + err := fc.FlushFile( + internal.FlushFileOptions{Handle: options.Handle, CloseInProgress: true}, + ) //nolint if err != nil { log.Err("FileCache::SyncFile : failed to flush file %s", options.Handle.Path) return err @@ -1268,13 +1413,20 @@ func (fc *FileCache) FlushFile(options internal.FlushFileOptions) error { if options.Handle.Dirty() { if fc.lazyWrite && !options.CloseInProgress { // As lazy-write is enable, upload will be scheduled when file is closed. - log.Info("FileCache::FlushFile : %s will be flushed when handle %d is closed", options.Handle.Path, options.Handle.ID) + log.Info( + "FileCache::FlushFile : %s will be flushed when handle %d is closed", + options.Handle.Path, + options.Handle.ID, + ) return nil } f := options.Handle.GetFileObject() if f == nil { - log.Err("FileCache::FlushFile : error [couldn't find fd in handle] %s", options.Handle.Path) + log.Err( + "FileCache::FlushFile : error [couldn't find fd in handle] %s", + options.Handle.Path, + ) return syscall.EBADF } @@ -1284,13 +1436,19 @@ func (fc *FileCache) FlushFile(options internal.FlushFileOptions) error { // f.fsync() is another option but dup+close does it quickly compared to sync dupFd, err := syscall.Dup(int(f.Fd())) if err != nil { - log.Err("FileCache::FlushFile : error [couldn't duplicate the fd] %s", options.Handle.Path) + log.Err( + "FileCache::FlushFile : error [couldn't duplicate the fd] %s", + options.Handle.Path, + ) return syscall.EIO } err = syscall.Close(dupFd) if err != nil { - log.Err("FileCache::FlushFile : error [unable to close duplicate fd] %s", options.Handle.Path) + log.Err( + "FileCache::FlushFile : error [unable to close duplicate fd] %s", + options.Handle.Path, + ) return syscall.EIO } @@ -1310,12 +1468,19 @@ func (fc *FileCache) FlushFile(options internal.FlushFileOptions) error { if err == nil { modeChanged = true uploadHandle, err = os.Open(localPath) - log.Info("FileCache::FlushFile : read mode added to file %s", options.Handle.Path) + log.Info( + "FileCache::FlushFile : read mode added to file %s", + options.Handle.Path, + ) } } if err != nil { - log.Err("FileCache::FlushFile : error [unable to open upload handle] %s [%s]", options.Handle.Path, err.Error()) + log.Err( + "FileCache::FlushFile : error [unable to open upload handle] %s [%s]", + options.Handle.Path, + err.Error(), + ) return err } } @@ -1330,12 +1495,20 @@ func (fc *FileCache) FlushFile(options internal.FlushFileOptions) error { if modeChanged { err1 := os.Chmod(localPath, orgMode) if err1 != nil { - log.Err("FileCache::FlushFile : Failed to remove read mode from file %s [%s]", options.Handle.Path, err1.Error()) + log.Err( + "FileCache::FlushFile : Failed to remove read mode from file %s [%s]", + options.Handle.Path, + err1.Error(), + ) } } if err != nil { - log.Err("FileCache::FlushFile : %s upload failed [%s]", options.Handle.Path, err.Error()) + log.Err( + "FileCache::FlushFile : %s upload failed [%s]", + options.Handle.Path, + err.Error(), + ) return err } @@ -1358,7 +1531,11 @@ func (fc *FileCache) FlushFile(options internal.FlushFileOptions) error { if err != nil { // chmod was missed earlier for this file and doing it now also // resulted in error so ignore this one and proceed for flush handling - log.Err("FileCache::FlushFile : %s chmod failed [%s]", options.Handle.Path, err.Error()) + log.Err( + "FileCache::FlushFile : %s chmod failed [%s]", + options.Handle.Path, + err.Error(), + ) } } } @@ -1453,7 +1630,11 @@ func (fc *FileCache) RenameFile(options internal.RenameFileOptions) error { // stale content). We either need to remove dest file as well from cache or just run rename to replace the content. err = os.Rename(localSrcPath, localDstPath) if err != nil && !os.IsNotExist(err) { - log.Err("FileCache::RenameFile : %s failed to rename local file %s [%s]", localSrcPath, err.Error()) + log.Err( + "FileCache::RenameFile : %s failed to rename local file %s [%s]", + localSrcPath, + err.Error(), + ) } if err != nil { @@ -1462,7 +1643,11 @@ func (fc *FileCache) RenameFile(options internal.RenameFileOptions) error { // so deleting local dest file ensures next open of that will get the updated file from container err = deleteFile(localDstPath) if err != nil && !os.IsNotExist(err) { - log.Err("FileCache::RenameFile : %s failed to delete local file %s [%s]", localDstPath, err.Error()) + log.Err( + "FileCache::RenameFile : %s failed to delete local file %s [%s]", + localDstPath, + err.Error(), + ) } fc.policy.CachePurge(localDstPath) @@ -1470,7 +1655,11 @@ func (fc *FileCache) RenameFile(options internal.RenameFileOptions) error { err = deleteFile(localSrcPath) if err != nil && !os.IsNotExist(err) { - log.Err("FileCache::RenameFile : %s failed to delete local file %s [%s]", localSrcPath, err.Error()) + log.Err( + "FileCache::RenameFile : %s failed to delete local file %s [%s]", + localSrcPath, + err.Error(), + ) } fc.policy.CachePurge(localSrcPath) @@ -1493,7 +1682,10 @@ func (fc *FileCache) TruncateFile(options internal.TruncateFileOptions) error { if fc.diskHighWaterMark != 0 { currSize, err := common.GetUsage(fc.tmpPath) if err != nil { - log.Err("FileCache::TruncateFile : error getting current usage of cache [%s]", err.Error()) + log.Err( + "FileCache::TruncateFile : error getting current usage of cache [%s]", + err.Error(), + ) } else { if (currSize + float64(options.Size)) > fc.diskHighWaterMark { log.Err("FileCache::TruncateFile : cache size limit reached [%f] failed to open %s", fc.maxCacheSize, options.Name) @@ -1522,7 +1714,11 @@ func (fc *FileCache) TruncateFile(options internal.TruncateFileOptions) error { if info.Size() != options.Size { err = os.Truncate(localPath, options.Size) if err != nil { - log.Err("FileCache::TruncateFile : error truncating cached file %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::TruncateFile : error truncating cached file %s [%s]", + localPath, + err.Error(), + ) return err } } @@ -1556,7 +1752,11 @@ func (fc *FileCache) Chmod(options internal.ChmodOptions) error { if info.Mode() != options.Mode { err = os.Chmod(localPath, options.Mode) if err != nil { - log.Err("FileCache::Chmod : error changing mode on the cached path %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::Chmod : error changing mode on the cached path %s [%s]", + localPath, + err.Error(), + ) return err } } @@ -1585,7 +1785,11 @@ func (fc *FileCache) Chown(options internal.ChownOptions) error { err = os.Chown(localPath, options.Owner, options.Group) if err != nil { - log.Err("FileCache::Chown : error changing owner on the cached path %s [%s]", localPath, err.Error()) + log.Err( + "FileCache::Chown : error changing owner on the cached path %s [%s]", + localPath, + err.Error(), + ) return err } } @@ -1617,42 +1821,86 @@ func NewFileCacheComponent() internal.Component { func init() { internal.AddComponent(compName, NewFileCacheComponent) - tmpPathFlag := config.AddStringFlag("tmp-path", "", "configures the tmp location for the cache. Configure the fastest disk (SSD or ramdisk) for best performance.") + tmpPathFlag := config.AddStringFlag( + "tmp-path", + "", + "configures the tmp location for the cache. Configure the fastest disk (SSD or ramdisk) for best performance.", + ) config.BindPFlag(compName+".path", tmpPathFlag) - fileCacheTimeout := config.AddUint32Flag("file-cache-timeout", defaultFileCacheTimeout, "file cache timeout") + fileCacheTimeout := config.AddUint32Flag( + "file-cache-timeout", + defaultFileCacheTimeout, + "file cache timeout", + ) config.BindPFlag(compName+".timeout-sec", fileCacheTimeout) - fileCacheTimeoutSec := config.AddUint32Flag("file-cache-timeout-in-seconds", defaultFileCacheTimeout, "file cache timeout") + fileCacheTimeoutSec := config.AddUint32Flag( + "file-cache-timeout-in-seconds", + defaultFileCacheTimeout, + "file cache timeout", + ) config.BindPFlag(compName+".file-cache-timeout-in-seconds", fileCacheTimeoutSec) fileCacheTimeoutSec.Hidden = true - cacheSizeMB := config.AddUint32Flag("cache-size-mb", 0, "max size in MB that file-cache can occupy on local disk for caching") + cacheSizeMB := config.AddUint32Flag( + "cache-size-mb", + 0, + "max size in MB that file-cache can occupy on local disk for caching", + ) config.BindPFlag(compName+".max-size-mb", cacheSizeMB) - highThreshold := config.AddUint32Flag("high-disk-threshold", 90, "percentage of cache utilization which kicks in early eviction") + highThreshold := config.AddUint32Flag( + "high-disk-threshold", + 90, + "percentage of cache utilization which kicks in early eviction", + ) config.BindPFlag(compName+".high-threshold", highThreshold) - lowThreshold := config.AddUint32Flag("low-disk-threshold", 80, "percentage of cache utilization which stops early eviction started by high-disk-threshold") + lowThreshold := config.AddUint32Flag( + "low-disk-threshold", + 80, + "percentage of cache utilization which stops early eviction started by high-disk-threshold", + ) config.BindPFlag(compName+".low-threshold", lowThreshold) - maxEviction := config.AddUint32Flag("max-eviction", 0, "Number of files to be evicted from cache at once.") + maxEviction := config.AddUint32Flag( + "max-eviction", + 0, + "Number of files to be evicted from cache at once.", + ) config.BindPFlag(compName+".max-eviction", maxEviction) maxEviction.Hidden = true - emptyDirCheck := config.AddBoolFlag("empty-dir-check", false, "Disallows remounting using a non-empty tmp-path.") + emptyDirCheck := config.AddBoolFlag( + "empty-dir-check", + false, + "Disallows remounting using a non-empty tmp-path.", + ) config.BindPFlag(compName+".empty-dir-check", emptyDirCheck) emptyDirCheck.Hidden = true - backgroundDownload := config.AddBoolFlag("background-download", false, "File download to run in the background on open call.") + backgroundDownload := config.AddBoolFlag( + "background-download", + false, + "File download to run in the background on open call.", + ) config.BindPFlag(compName+".background-download", backgroundDownload) backgroundDownload.Hidden = true - cachePollTimeout := config.AddUint64Flag("cache-poll-timeout-msec", 0, "Time in milliseconds in order to poll for possible expired files awaiting cache eviction.") + cachePollTimeout := config.AddUint64Flag( + "cache-poll-timeout-msec", + 0, + "Time in milliseconds in order to poll for possible expired files awaiting cache eviction.", + ) config.BindPFlag(compName+".cache-poll-timeout-msec", cachePollTimeout) cachePollTimeout.Hidden = true - uploadModifiedOnly := config.AddBoolFlag("upload-modified-only", false, "Flag to turn off unnecessary uploads to storage.") + uploadModifiedOnly := config.AddBoolFlag( + "upload-modified-only", + false, + "Flag to turn off unnecessary uploads to storage.", + ) config.BindPFlag(compName+".upload-modified-only", uploadModifiedOnly) uploadModifiedOnly.Hidden = true @@ -1660,16 +1908,31 @@ func init() { config.BindPFlag(compName+".policy", cachePolicy) cachePolicy.Hidden = true - syncToFlush := config.AddBoolFlag("sync-to-flush", false, "Sync call on file will force a upload of the file.") + syncToFlush := config.AddBoolFlag( + "sync-to-flush", + false, + "Sync call on file will force a upload of the file.", + ) config.BindPFlag(compName+".sync-to-flush", syncToFlush) - ignoreSync := config.AddBoolFlag("ignore-sync", false, "Just ignore sync call and do not invalidate locally cached file.") + ignoreSync := config.AddBoolFlag( + "ignore-sync", + false, + "Just ignore sync call and do not invalidate locally cached file.", + ) config.BindPFlag(compName+".ignore-sync", ignoreSync) - hardLimit := config.AddBoolFlag("hard-limit", false, "File cache limits are hard limits or not.") + hardLimit := config.AddBoolFlag( + "hard-limit", + false, + "File cache limits are hard limits or not.", + ) config.BindPFlag(compName+".hard-limit", hardLimit) - config.RegisterFlagCompletionFunc("tmp-path", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveDefault - }) + config.RegisterFlagCompletionFunc( + "tmp-path", + func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveDefault + }, + ) } diff --git a/component/file_cache/lru_policy.go b/component/file_cache/lru_policy.go index a3f1a850d4..c1035155e9 100644 --- a/component/file_cache/lru_policy.go +++ b/component/file_cache/lru_policy.go @@ -139,7 +139,9 @@ func (p *lruPolicy) StartPolicy() error { log.Info("lruPolicy::StartPolicy : Policy set with %v timeout", p.cacheTimeout) if p.cacheTimeout != 0 { - p.cacheTimeoutMonitor = time.Tick(time.Duration(time.Duration(p.cacheTimeout) * time.Second)) + p.cacheTimeoutMonitor = time.Tick( + time.Duration(time.Duration(p.cacheTimeout) * time.Second), + ) } go p.clearCache() @@ -297,7 +299,11 @@ func (p *lruPolicy) clearCache() { if pUsage > p.highThreshold { continueDeletion := true for continueDeletion { - log.Info("lruPolicy::ClearCache : High threshold reached %f > %f", pUsage, p.highThreshold) + log.Info( + "lruPolicy::ClearCache : High threshold reached %f > %f", + pUsage, + p.highThreshold, + ) cleanupCount++ p.updateMarker() @@ -306,7 +312,11 @@ func (p *lruPolicy) clearCache() { pUsage := getUsagePercentage(p.tmpPath, p.maxSizeMB) if pUsage < p.lowThreshold || cleanupCount >= 3 { - log.Info("lruPolicy::ClearCache : Threshold stabilized %f > %f", pUsage, p.lowThreshold) + log.Info( + "lruPolicy::ClearCache : Threshold stabilized %f > %f", + pUsage, + p.lowThreshold, + ) continueDeletion = false } } @@ -428,7 +438,11 @@ func (p *lruPolicy) deleteItem(name string) { azPath := strings.TrimPrefix(name, p.tmpPath) if azPath == "" { - log.Err("lruPolicy::DeleteItem : Empty file name formed name : %s, tmpPath : %s", name, p.tmpPath) + log.Err( + "lruPolicy::DeleteItem : Empty file name formed name : %s, tmpPath : %s", + name, + p.tmpPath, + ) return } @@ -471,7 +485,7 @@ func (p *lruPolicy) printNodes() { node := p.head - var count int = 0 + var count = 0 log.Debug("lruPolicy::printNodes : Starts") for ; node != nil; node = node.next { diff --git a/component/libfuse/libfuse.go b/component/libfuse/libfuse.go index 2f89a3868d..c530173920 100644 --- a/component/libfuse/libfuse.go +++ b/component/libfuse/libfuse.go @@ -90,23 +90,23 @@ type dirChildCache struct { // Structure defining your config parameters type LibfuseOptions struct { mountPath string - DefaultPermission uint32 `config:"default-permission" yaml:"default-permission,omitempty"` - AttributeExpiration uint32 `config:"attribute-expiration-sec" yaml:"attribute-expiration-sec,omitempty"` - EntryExpiration uint32 `config:"entry-expiration-sec" yaml:"entry-expiration-sec,omitempty"` + DefaultPermission uint32 `config:"default-permission" yaml:"default-permission,omitempty"` + AttributeExpiration uint32 `config:"attribute-expiration-sec" yaml:"attribute-expiration-sec,omitempty"` + EntryExpiration uint32 `config:"entry-expiration-sec" yaml:"entry-expiration-sec,omitempty"` NegativeEntryExpiration uint32 `config:"negative-entry-expiration-sec" yaml:"negative-entry-expiration-sec,omitempty"` - EnableFuseTrace bool `config:"fuse-trace" yaml:"fuse-trace,omitempty"` - allowOther bool `config:"allow-other" yaml:"-"` - allowRoot bool `config:"allow-root" yaml:"-"` - readOnly bool `config:"read-only" yaml:"-"` - ExtensionPath string `config:"extension" yaml:"extension,omitempty"` - DisableWritebackCache bool `config:"disable-writeback-cache" yaml:"-"` - IgnoreOpenFlags bool `config:"ignore-open-flags" yaml:"ignore-open-flags,omitempty"` - nonEmptyMount bool `config:"nonempty" yaml:"nonempty,omitempty"` - Uid uint32 `config:"uid" yaml:"uid,omitempty"` - Gid uint32 `config:"gid" yaml:"gid,omitempty"` - MaxFuseThreads uint32 `config:"max-fuse-threads" yaml:"max-fuse-threads,omitempty"` - DirectIO bool `config:"direct-io" yaml:"direct-io,omitempty"` - Umask uint32 `config:"umask" yaml:"umask,omitempty"` + EnableFuseTrace bool `config:"fuse-trace" yaml:"fuse-trace,omitempty"` + allowOther bool `config:"allow-other" yaml:"-"` + allowRoot bool `config:"allow-root" yaml:"-"` + readOnly bool `config:"read-only" yaml:"-"` + ExtensionPath string `config:"extension" yaml:"extension,omitempty"` + DisableWritebackCache bool `config:"disable-writeback-cache" yaml:"-"` + IgnoreOpenFlags bool `config:"ignore-open-flags" yaml:"ignore-open-flags,omitempty"` + nonEmptyMount bool `config:"nonempty" yaml:"nonempty,omitempty"` + Uid uint32 `config:"uid" yaml:"uid,omitempty"` + Gid uint32 `config:"gid" yaml:"gid,omitempty"` + MaxFuseThreads uint32 `config:"max-fuse-threads" yaml:"max-fuse-threads,omitempty"` + DirectIO bool `config:"direct-io" yaml:"direct-io,omitempty"` + Umask uint32 `config:"umask" yaml:"umask,omitempty"` } const compName = "libfuse" @@ -215,19 +215,22 @@ func (lf *Libfuse) Validate(opt *LibfuseOptions) error { } } - if config.IsSet(compName+".entry-expiration-sec") || config.IsSet("lfuse.entry-expiration-sec") { + if config.IsSet(compName+".entry-expiration-sec") || + config.IsSet("lfuse.entry-expiration-sec") { lf.entryExpiration = opt.EntryExpiration } else { lf.entryExpiration = defaultEntryExpiration } - if config.IsSet(compName+".attribute-expiration-sec") || config.IsSet("lfuse.attribute-expiration-sec") { + if config.IsSet(compName+".attribute-expiration-sec") || + config.IsSet("lfuse.attribute-expiration-sec") { lf.attributeExpiration = opt.AttributeExpiration } else { lf.attributeExpiration = defaultAttrExpiration } - if config.IsSet(compName+".negative-entry-expiration-sec") || config.IsSet("lfuse.negative-entry-expiration-sec") { + if config.IsSet(compName+".negative-entry-expiration-sec") || + config.IsSet("lfuse.negative-entry-expiration-sec") { lf.negativeTimeout = opt.NegativeEntryExpiration } else { lf.negativeTimeout = defaultNegativeEntryExpiration @@ -240,8 +243,9 @@ func (lf *Libfuse) Validate(opt *LibfuseOptions) error { log.Crit("Libfuse::Validate : DirectIO enabled, setting fuse timeouts to 0") } - if !(config.IsSet(compName+".uid") || config.IsSet(compName+".gid") || - config.IsSet("lfuse.uid") || config.IsSet("lfuse.gid")) { + if !config.IsSet(compName+".uid") && !config.IsSet(compName+".gid") && + !config.IsSet("lfuse.uid") && + !config.IsSet("lfuse.gid") { var err error lf.ownerUID, lf.ownerGID, err = common.GetCurrentUser() if err != nil { @@ -349,8 +353,27 @@ func (lf *Libfuse) Configure(_ bool) error { } } - log.Crit("Libfuse::Configure : read-only %t, allow-other %t, allow-root %t, default-perm %d, entry-timeout %d, attr-time %d, negative-timeout %d, ignore-open-flags %t, nonempty %t, direct_io %t, max-fuse-threads %d, fuse-trace %t, extension %s, disable-writeback-cache %t, dirPermission %v, mountPath %v, umask %v, disableKernelCache %v", - lf.readOnly, lf.allowOther, lf.allowRoot, lf.filePermission, lf.entryExpiration, lf.attributeExpiration, lf.negativeTimeout, lf.ignoreOpenFlags, lf.nonEmptyMount, lf.directIO, lf.maxFuseThreads, lf.traceEnable, lf.extensionPath, lf.disableWritebackCache, lf.dirPermission, lf.mountPath, lf.umask, lf.disableKernelCache) + log.Crit( + "Libfuse::Configure : read-only %t, allow-other %t, allow-root %t, default-perm %d, entry-timeout %d, attr-time %d, negative-timeout %d, ignore-open-flags %t, nonempty %t, direct_io %t, max-fuse-threads %d, fuse-trace %t, extension %s, disable-writeback-cache %t, dirPermission %v, mountPath %v, umask %v, disableKernelCache %v", + lf.readOnly, + lf.allowOther, + lf.allowRoot, + lf.filePermission, + lf.entryExpiration, + lf.attributeExpiration, + lf.negativeTimeout, + lf.ignoreOpenFlags, + lf.nonEmptyMount, + lf.directIO, + lf.maxFuseThreads, + lf.traceEnable, + lf.extensionPath, + lf.disableWritebackCache, + lf.dirPermission, + lf.mountPath, + lf.umask, + lf.disableKernelCache, + ) return nil } @@ -375,19 +398,35 @@ func init() { entryTimeoutFlag := config.AddUint32Flag("entry-timeout", 0, "The entry timeout in seconds.") config.BindPFlag(compName+".entry-expiration-sec", entryTimeoutFlag) - negativeTimeoutFlag := config.AddUint32Flag("negative-timeout", 0, "The negative entry timeout in seconds.") + negativeTimeoutFlag := config.AddUint32Flag( + "negative-timeout", + 0, + "The negative entry timeout in seconds.", + ) config.BindPFlag(compName+".negative-entry-expiration-sec", negativeTimeoutFlag) - allowOther := config.AddBoolFlag("allow-other", false, "Allow other users to access this mount point.") + allowOther := config.AddBoolFlag( + "allow-other", + false, + "Allow other users to access this mount point.", + ) config.BindPFlag("allow-other", allowOther) - disableWritebackCache := config.AddBoolFlag("disable-writeback-cache", false, "Disallow libfuse to buffer write requests if you must strictly open files in O_WRONLY or O_APPEND mode.") + disableWritebackCache := config.AddBoolFlag( + "disable-writeback-cache", + false, + "Disallow libfuse to buffer write requests if you must strictly open files in O_WRONLY or O_APPEND mode.", + ) config.BindPFlag(compName+".disable-writeback-cache", disableWritebackCache) debug := config.AddBoolPFlag("d", false, "Mount with foreground and FUSE logs on.") config.BindPFlag(compName+".fuse-trace", debug) debug.Hidden = true - ignoreOpenFlags := config.AddBoolFlag("ignore-open-flags", true, "Ignore unsupported open flags (APPEND, WRONLY) by blobfuse when writeback caching is enabled.") + ignoreOpenFlags := config.AddBoolFlag( + "ignore-open-flags", + true, + "Ignore unsupported open flags (APPEND, WRONLY) by blobfuse when writeback caching is enabled.", + ) config.BindPFlag(compName+".ignore-open-flags", ignoreOpenFlags) } diff --git a/component/libfuse/libfuse2_handler.go b/component/libfuse/libfuse2_handler.go index 1ffd8cfaad..553f291a8c 100644 --- a/component/libfuse/libfuse2_handler.go +++ b/component/libfuse/libfuse2_handler.go @@ -141,7 +141,10 @@ func (lf *Libfuse) initFuse() error { errc = C.get_extension_callbacks(&operations) if errc != 0 { C.unload_library() - log.Err("Libfuse::InitFuse : Failed to get callback table from extension. error code %d", errc) + log.Err( + "Libfuse::InitFuse : Failed to get callback table from extension. error code %d", + errc, + ) return errors.New("failed to get callback table from extension") } log.Trace("Libfuse::InitFuse : Extension callback retrieved") @@ -154,7 +157,10 @@ func (lf *Libfuse) initFuse() error { errc = C.register_callback_to_extension(&my_operations) if errc != 0 { C.unload_library() - log.Err("Libfuse::InitFuse : Failed to register callback table to extension. error code %d", errc) + log.Err( + "Libfuse::InitFuse : Failed to register callback table to extension. error code %d", + errc, + ) return errors.New("failed to register callback table to extension") } log.Trace("Libfuse::InitFuse : Callbacks registered to extension") @@ -375,11 +381,12 @@ func libfuse2_getattr(path *C.char, stbuf *C.stat_t) C.int { attr, err := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: name}) if err != nil { //log.Err("Libfuse::libfuse2_getattr : Failed to get attributes of %s [%s]", name, err.Error()) - if err == syscall.ENOENT { + switch err { + case syscall.ENOENT: return -C.ENOENT - } else if err == syscall.EACCES { + case syscall.EACCES: return -C.EACCES - } else { + default: return -C.EIO } } @@ -431,7 +438,8 @@ func libfuse_mkdir(path *C.char, mode C.mode_t) C.int { name = common.NormalizeObjectName(name) log.Trace("Libfuse::libfuse2_mkdir : %s", name) - err := fuseFS.NextComponent().CreateDir(internal.CreateDirOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) + err := fuseFS.NextComponent(). + CreateDir(internal.CreateDirOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) if err != nil { log.Err("Libfuse::libfuse2_mkdir : Failed to create %s [%s]", name, err.Error()) if os.IsPermission(err) { @@ -443,7 +451,11 @@ func libfuse_mkdir(path *C.char, mode C.mode_t) C.int { } } - libfuseStatsCollector.PushEvents(createDir, name, map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}) + libfuseStatsCollector.PushEvents( + createDir, + name, + map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}, + ) libfuseStatsCollector.UpdateStats(stats_manager.Increment, createDir, (int64)(1)) return 0 @@ -493,7 +505,13 @@ func libfuse_releasedir(path *C.char, fi *C.fuse_file_info_t) C.int { // libfuse2_readdir reads a directory // //export libfuse2_readdir -func libfuse2_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, off C.off_t, fi *C.fuse_file_info_t) C.int { +func libfuse2_readdir( + _ *C.char, + buf unsafe.Pointer, + filler C.fuse_fill_dir_t, + off C.off_t, + fi *C.fuse_file_info_t, +) C.int { handle := (*handlemap.Handle)(unsafe.Pointer(uintptr(fi.fh))) handle.RLock() @@ -516,7 +534,13 @@ func libfuse2_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, o }) if err != nil { - log.Err("Libfuse::libfuse2_readdir : Path %s, handle: %d, offset %d. Error in retrieval %s", handle.Path, handle.ID, off_64, err.Error()) + log.Err( + "Libfuse::libfuse2_readdir : Path %s, handle: %d, offset %d. Error in retrieval %s", + handle.Path, + handle.ID, + off_64, + err.Error(), + ) if os.IsNotExist(err) { return C.int(C_ENOENT) } else if os.IsPermission(err) { @@ -527,7 +551,12 @@ func libfuse2_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, o } if off_64 == 0 { - attrs = append([]*internal.ObjAttr{{Flags: fuseFS.lsFlags, Name: "."}, {Flags: fuseFS.lsFlags, Name: ".."}}, attrs...) + attrs = append( + []*internal.ObjAttr{ + {Flags: fuseFS.lsFlags, Name: "."}, + {Flags: fuseFS.lsFlags, Name: ".."}, + }, + attrs...) } cacheInfo.sIndex = off_64 @@ -551,7 +580,7 @@ func libfuse2_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, o fuseFS.fillStat(cacheInfo.children[segmentIdx], &stbuf) name := C.CString(cacheInfo.children[segmentIdx].Name) - if 0 != C.fill_dir_entry(filler, buf, name, &stbuf, idx+1) { + if ret := C.fill_dir_entry(filler, buf, name, &stbuf, idx+1); ret != 0 { C.free(unsafe.Pointer(name)) break } @@ -602,7 +631,8 @@ func libfuse_create(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { name = common.NormalizeObjectName(name) log.Trace("Libfuse::libfuse2_create : %s", name) - handle, err := fuseFS.NextComponent().CreateFile(internal.CreateFileOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) + handle, err := fuseFS.NextComponent(). + CreateFile(internal.CreateFileOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) if err != nil { log.Err("Libfuse::libfuse2_create : Failed to create %s [%s]", name, err.Error()) if os.IsExist(err) { @@ -615,14 +645,22 @@ func libfuse_create(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { } handlemap.Add(handle) - ret_val := C.allocate_native_file_object(C.ulong(handle.UnixFD), C.ulong(uintptr(unsafe.Pointer(handle))), 0) + ret_val := C.allocate_native_file_object( + C.ulong(handle.UnixFD), + C.ulong(uintptr(unsafe.Pointer(handle))), + 0, + ) if !handle.Cached() { ret_val.fd = 0 } log.Trace("Libfuse::libfuse2_create : %s, handle %d", name, handle.ID) fi.fh = C.ulong(uintptr(unsafe.Pointer(ret_val))) - libfuseStatsCollector.PushEvents(createFile, name, map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}) + libfuseStatsCollector.PushEvents( + createFile, + name, + map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}, + ) // increment open file handles count libfuseStatsCollector.UpdateStats(stats_manager.Increment, openHandles, (int64)(1)) @@ -668,7 +706,11 @@ func libfuse_open(path *C.char, fi *C.fuse_file_info_t) C.int { } handlemap.Add(handle) - ret_val := C.allocate_native_file_object(C.ulong(handle.UnixFD), C.ulong(uintptr(unsafe.Pointer(handle))), C.ulong(handle.Size)) + ret_val := C.allocate_native_file_object( + C.ulong(handle.UnixFD), + C.ulong(uintptr(unsafe.Pointer(handle))), + C.ulong(handle.Size), + ) if !handle.Cached() { ret_val.fd = 0 } @@ -684,7 +726,13 @@ func libfuse_open(path *C.char, fi *C.fuse_file_info_t) C.int { // libfuse_read reads data from an open file // //export libfuse_read -func libfuse_read(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.fuse_file_info_t) C.int { +func libfuse_read( + path *C.char, + buf *C.char, + size C.size_t, + off C.off_t, + fi *C.fuse_file_info_t, +) C.int { fileHandle := (*C.file_handle_t)(unsafe.Pointer(uintptr(fi.fh))) handle := (*handlemap.Handle)(unsafe.Pointer(uintptr(fileHandle.obj))) @@ -710,7 +758,12 @@ func libfuse_read(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.f err = nil } if err != nil { - log.Err("Libfuse::libfuse2_read : error reading file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) + log.Err( + "Libfuse::libfuse2_read : error reading file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) return -C.EIO } @@ -720,7 +773,13 @@ func libfuse_read(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.f // libfuse_write writes data to an open file // //export libfuse_write -func libfuse_write(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.fuse_file_info_t) C.int { +func libfuse_write( + path *C.char, + buf *C.char, + size C.size_t, + off C.off_t, + fi *C.fuse_file_info_t, +) C.int { fileHandle := (*C.file_handle_t)(unsafe.Pointer(uintptr(fi.fh))) handle := (*handlemap.Handle)(unsafe.Pointer(uintptr(fileHandle.obj))) @@ -735,7 +794,12 @@ func libfuse_write(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C. }) if err != nil { - log.Err("Libfuse::libfuse2_write : error writing file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) + log.Err( + "Libfuse::libfuse2_write : error writing file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) return -C.EIO } @@ -762,12 +826,18 @@ func libfuse_flush(path *C.char, fi *C.fuse_file_info_t) C.int { err := fuseFS.NextComponent().FlushFile(internal.FlushFileOptions{Handle: handle}) if err != nil { - log.Err("Libfuse::libfuse2_flush : error flushing file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) - if err == syscall.ENOENT { + log.Err( + "Libfuse::libfuse2_flush : error flushing file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) + switch err { + case syscall.ENOENT: return -C.ENOENT - } else if err == syscall.EACCES { + case syscall.EACCES: return -C.EACCES - } else { + default: return -C.EIO } } @@ -792,7 +862,8 @@ func libfuse2_truncate(path *C.char, off C.off_t) C.int { log.Trace("Libfuse::libfuse2_truncate : %s size %d", name, off) - err := fuseFS.NextComponent().TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(off)}) + err := fuseFS.NextComponent(). + TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(off)}) if err != nil { log.Err("Libfuse::libfuse2_truncate : error truncating file %s [%s]", name, err.Error()) if os.IsNotExist(err) { @@ -822,12 +893,18 @@ func libfuse_release(path *C.char, fi *C.fuse_file_info_t) C.int { err := fuseFS.NextComponent().CloseFile(internal.CloseFileOptions{Handle: handle}) if err != nil { - log.Err("Libfuse::libfuse2_release : error closing file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) - if err == syscall.ENOENT { + log.Err( + "Libfuse::libfuse2_release : error closing file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) + switch err { + case syscall.ENOENT: return -C.ENOENT - } else if err == syscall.EACCES { + case syscall.EACCES: return -C.EACCES - } else { + default: return -C.EIO } } @@ -883,26 +960,42 @@ func libfuse2_rename(src *C.char, dst *C.char) C.int { // ENOENT. Not covered: a directory component in dst does not exist if srcPath == "" || dstPath == "" { - log.Err("Libfuse::libfuse2_rename : src: [%s] or dst: [%s] is an empty string", srcPath, dstPath) + log.Err( + "Libfuse::libfuse2_rename : src: [%s] or dst: [%s] is an empty string", + srcPath, + dstPath, + ) return -C.ENOENT } srcAttr, srcErr := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: srcPath}) if os.IsNotExist(srcErr) { - log.Err("Libfuse::libfuse2_rename : Failed to get attributes of %s [%s]", srcPath, srcErr.Error()) + log.Err( + "Libfuse::libfuse2_rename : Failed to get attributes of %s [%s]", + srcPath, + srcErr.Error(), + ) return -C.ENOENT } dstAttr, dstErr := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: dstPath}) // EISDIR if (dstErr == nil || os.IsExist(dstErr)) && dstAttr.IsDir() && !srcAttr.IsDir() { - log.Err("Libfuse::libfuse2_rename : dst [%s] is an existing directory but src [%s] is not a directory", dstPath, srcPath) + log.Err( + "Libfuse::libfuse2_rename : dst [%s] is an existing directory but src [%s] is not a directory", + dstPath, + srcPath, + ) return -C.EISDIR } // ENOTDIR if (dstErr == nil || os.IsExist(dstErr)) && !dstAttr.IsDir() && srcAttr.IsDir() { - log.Err("Libfuse::libfuse2_rename : dst [%s] is an existing file but src [%s] is a directory", dstPath, srcPath) + log.Err( + "Libfuse::libfuse2_rename : dst [%s] is an existing file but src [%s] is a directory", + dstPath, + srcPath, + ) return -C.ENOTDIR } @@ -915,13 +1008,23 @@ func libfuse2_rename(src *C.char, dst *C.char) C.int { } } - err := fuseFS.NextComponent().RenameDir(internal.RenameDirOptions{Src: srcPath, Dst: dstPath}) + err := fuseFS.NextComponent(). + RenameDir(internal.RenameDirOptions{Src: srcPath, Dst: dstPath}) if err != nil { - log.Err("Libfuse::libfuse2_rename : error renaming directory %s -> %s [%s]", srcPath, dstPath, err.Error()) + log.Err( + "Libfuse::libfuse2_rename : error renaming directory %s -> %s [%s]", + srcPath, + dstPath, + err.Error(), + ) return -C.EIO } - libfuseStatsCollector.PushEvents(renameDir, srcPath, map[string]interface{}{source: srcPath, dest: dstPath}) + libfuseStatsCollector.PushEvents( + renameDir, + srcPath, + map[string]interface{}{source: srcPath, dest: dstPath}, + ) libfuseStatsCollector.UpdateStats(stats_manager.Increment, renameDir, (int64)(1)) } else { @@ -956,9 +1059,15 @@ func libfuse_symlink(target *C.char, link *C.char) C.int { targetPath = common.NormalizeObjectName(targetPath) log.Trace("Libfuse::libfuse2_symlink : Received for %s -> %s", name, targetPath) - err := fuseFS.NextComponent().CreateLink(internal.CreateLinkOptions{Name: name, Target: targetPath}) + err := fuseFS.NextComponent(). + CreateLink(internal.CreateLinkOptions{Name: name, Target: targetPath}) if err != nil { - log.Err("Libfuse::libfuse2_symlink : error linking file %s -> %s [%s]", name, targetPath, err.Error()) + log.Err( + "Libfuse::libfuse2_symlink : error linking file %s -> %s [%s]", + name, + targetPath, + err.Error(), + ) return -C.EIO } @@ -982,7 +1091,8 @@ func libfuse_readlink(path *C.char, buf *C.char, size C.size_t) C.int { linkSize = attr.Size } - targetPath, err := fuseFS.NextComponent().ReadLink(internal.ReadLinkOptions{Name: name, Size: linkSize}) + targetPath, err := fuseFS.NextComponent(). + ReadLink(internal.ReadLinkOptions{Name: name, Size: linkSize}) if err != nil { log.Err("Libfuse::libfuse2_readlink : error reading link file %s [%s]", name, err.Error()) if os.IsNotExist(err) { @@ -1075,7 +1185,11 @@ func libfuse2_chmod(path *C.char, mode C.mode_t) C.int { return -C.EIO } - libfuseStatsCollector.PushEvents(chmod, name, map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}) + libfuseStatsCollector.PushEvents( + chmod, + name, + map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}, + ) libfuseStatsCollector.UpdateStats(stats_manager.Increment, chmod, (int64)(1)) return 0 diff --git a/component/libfuse/libfuse2_handler_test_wrapper.go b/component/libfuse/libfuse2_handler_test_wrapper.go index 6f0f8d5334..4d30ef03f7 100644 --- a/component/libfuse/libfuse2_handler_test_wrapper.go +++ b/component/libfuse/libfuse2_handler_test_wrapper.go @@ -77,10 +77,10 @@ var defaultSize = int64(0) var defaultMode = 0777 func newTestLibfuse(next internal.Component, configuration string) *Libfuse { - config.ReadConfigFromReader(strings.NewReader(configuration)) + _ = config.ReadConfigFromReader(strings.NewReader(configuration)) libfuse := NewLibfuseComponent() libfuse.SetNextComponent(next) - libfuse.Configure(true) + _ = libfuse.Configure(true) return libfuse.(*Libfuse) } @@ -130,10 +130,10 @@ func testStatFs(suite *libfuseTestSuite) { buf := &C.statvfs_t{} libfuse_statfs(path, buf) - suite.assert.Equal(int(buf.f_frsize), 1) - suite.assert.Equal(int(buf.f_blocks), 2) - suite.assert.Equal(int(buf.f_bavail), 3) - suite.assert.Equal(int(buf.f_bfree), 4) + suite.assert.Equal(1, int(buf.f_frsize)) + suite.assert.Equal(2, int(buf.f_blocks)) + suite.assert.Equal(3, int(buf.f_bavail)) + suite.assert.Equal(4, int(buf.f_bfree)) } func testMkDirError(suite *libfuseTestSuite) { @@ -143,7 +143,9 @@ func testMkDirError(suite *libfuseTestSuite) { defer C.free(unsafe.Pointer(path)) mode := fs.FileMode(0775) options := internal.CreateDirOptions{Name: name, Mode: mode} - suite.mock.EXPECT().CreateDir(options).Return(errors.New("failed to create directory")) + suite.mock.EXPECT(). + CreateDir(options). + Return(errors.New("failed to create directory")) err := libfuse_mkdir(path, 0775) suite.assert.Equal(C.int(-C.EIO), err) @@ -221,7 +223,9 @@ func testCreateError(suite *libfuseTestSuite) { mode := fs.FileMode(0775) info := &C.fuse_file_info_t{} options := internal.CreateFileOptions{Name: name, Mode: mode} - suite.mock.EXPECT().CreateFile(options).Return(&handlemap.Handle{}, errors.New("failed to create file")) + suite.mock.EXPECT(). + CreateFile(options). + Return(&handlemap.Handle{}, errors.New("failed to create file")) err := libfuse_create(path, 0775, info) suite.assert.Equal(C.int(-C.EIO), err) @@ -292,7 +296,9 @@ func testOpenAppendFlagDisableWritebackCache(suite *libfuseTestSuite) { defer suite.cleanupTest() suite.cleanupTest() // clean up the default libfuse generated config := "libfuse:\n disable-writeback-cache: true\n" - suite.setupTestHelper(config) // setup a new libfuse with a custom config (clean up will occur after the test as usual) + suite.setupTestHelper( + config, + ) // setup a new libfuse with a custom config (clean up will occur after the test as usual) suite.assert.True(suite.libfuse.disableWritebackCache) name := "path" @@ -322,7 +328,9 @@ func testOpenAppendFlagIgnoreAppendFlag(suite *libfuseTestSuite) { defer suite.cleanupTest() suite.cleanupTest() // clean up the default libfuse generated config := "libfuse:\n ignore-open-flags: true\n" - suite.setupTestHelper(config) // setup a new libfuse with a custom config (clean up will occur after the test as usual) + suite.setupTestHelper( + config, + ) // setup a new libfuse with a custom config (clean up will occur after the test as usual) suite.assert.True(suite.libfuse.ignoreOpenFlags) name := "path" @@ -383,7 +391,9 @@ func testOpenError(suite *libfuseTestSuite) { info := &C.fuse_file_info_t{} info.flags = C.O_RDWR options := internal.OpenFileOptions{Name: name, Flags: flags, Mode: mode} - suite.mock.EXPECT().OpenFile(options).Return(&handlemap.Handle{}, errors.New("failed to open a file")) + suite.mock.EXPECT(). + OpenFile(options). + Return(&handlemap.Handle{}, errors.New("failed to open a file")) err := libfuse_open(path, info) suite.assert.Equal(C.int(-C.EIO), err) diff --git a/component/libfuse/libfuse_handler.go b/component/libfuse/libfuse_handler.go index b8d2d08320..87719defdc 100644 --- a/component/libfuse/libfuse_handler.go +++ b/component/libfuse/libfuse_handler.go @@ -141,7 +141,10 @@ func (lf *Libfuse) initFuse() error { errc = C.get_extension_callbacks(&operations) if errc != 0 { C.unload_library() - log.Err("Libfuse::InitFuse : Failed to get callback table from extension. error code %d", errc) + log.Err( + "Libfuse::InitFuse : Failed to get callback table from extension. error code %d", + errc, + ) return errors.New("failed to get callback table from extension") } log.Trace("Libfuse::InitFuse : Extension callback retrieved") @@ -154,7 +157,10 @@ func (lf *Libfuse) initFuse() error { errc = C.register_callback_to_extension(&my_operations) if errc != 0 { C.unload_library() - log.Err("Libfuse::InitFuse : Failed to register callback table to extension. error code %d", errc) + log.Err( + "Libfuse::InitFuse : Failed to register callback table to extension. error code %d", + errc, + ) return errors.New("failed to register callback table to extension") } log.Trace("Libfuse::InitFuse : Callbacks registered to extension") @@ -256,7 +262,12 @@ func (lf *Libfuse) destroyFuse() error { //export libfuse_init func libfuse_init(conn *C.fuse_conn_info_t, cfg *C.fuse_config_t) (res unsafe.Pointer) { - log.Trace("Libfuse::libfuse_init : init (read : %v, write %v, read-ahead %v)", conn.max_read, conn.max_write, conn.max_readahead) + log.Trace( + "Libfuse::libfuse_init : init (read : %v, write %v, read-ahead %v)", + conn.max_read, + conn.max_write, + conn.max_readahead, + ) log.Info("Libfuse::NotifyMountToParent : Notifying parent for successful mount") if err := common.NotifyMountToParent(); err != nil { @@ -307,7 +318,8 @@ func libfuse_init(conn *C.fuse_conn_info_t, cfg *C.fuse_config_t) (res unsafe.Po even for files opened for O_WRONLY it is possible that READ requests will be generated by the kernel. */ - if (!fuseFS.directIO) && (!fuseFS.disableWritebackCache) && ((conn.capable & C.FUSE_CAP_WRITEBACK_CACHE) != 0) { + if (!fuseFS.directIO) && (!fuseFS.disableWritebackCache) && + ((conn.capable & C.FUSE_CAP_WRITEBACK_CACHE) != 0) { // Buffer write requests at libfuse and then hand it off to application log.Info("Libfuse::libfuse_init : Enable Capability : FUSE_CAP_WRITEBACK_CACHE") conn.want |= C.FUSE_CAP_WRITEBACK_CACHE @@ -412,11 +424,12 @@ func libfuse_getattr(path *C.char, stbuf *C.stat_t, fi *C.fuse_file_info_t) C.in attr, err := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: name}) if err != nil { // log.Err("Libfuse::libfuse_getattr : Failed to get attributes of %s [%s]", name, err.Error()) - if err == syscall.ENOENT { + switch err { + case syscall.ENOENT: return -C.ENOENT - } else if err == syscall.EACCES { + case syscall.EACCES: return -C.EACCES - } else { + default: return -C.EIO } } @@ -436,7 +449,8 @@ func libfuse_mkdir(path *C.char, mode C.mode_t) C.int { name = common.NormalizeObjectName(name) log.Trace("Libfuse::libfuse_mkdir : %s", name) - err := fuseFS.NextComponent().CreateDir(internal.CreateDirOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) + err := fuseFS.NextComponent(). + CreateDir(internal.CreateDirOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) if err != nil { log.Err("Libfuse::libfuse_mkdir : Failed to create %s [%s]", name, err.Error()) if os.IsPermission(err) { @@ -448,7 +462,11 @@ func libfuse_mkdir(path *C.char, mode C.mode_t) C.int { } } - libfuseStatsCollector.PushEvents(createDir, name, map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}) + libfuseStatsCollector.PushEvents( + createDir, + name, + map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}, + ) libfuseStatsCollector.UpdateStats(stats_manager.Increment, createDir, (int64)(1)) return 0 @@ -500,7 +518,14 @@ func libfuse_releasedir(path *C.char, fi *C.fuse_file_info_t) C.int { // libfuse_readdir reads a directory // //export libfuse_readdir -func libfuse_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, off C.off_t, fi *C.fuse_file_info_t, flag C.fuse_readdir_flags_t) C.int { +func libfuse_readdir( + _ *C.char, + buf unsafe.Pointer, + filler C.fuse_fill_dir_t, + off C.off_t, + fi *C.fuse_file_info_t, + flag C.fuse_readdir_flags_t, +) C.int { handle := (*handlemap.Handle)(unsafe.Pointer(uintptr(fi.fh))) handle.RLock() @@ -523,7 +548,13 @@ func libfuse_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, of }) if err != nil { - log.Err("Libfuse::libfuse_readdir : Path %s, handle: %d, offset %d. Error in retrieval %s", handle.Path, handle.ID, off_64, err.Error()) + log.Err( + "Libfuse::libfuse_readdir : Path %s, handle: %d, offset %d. Error in retrieval %s", + handle.Path, + handle.ID, + off_64, + err.Error(), + ) if os.IsNotExist(err) { return C.int(C_ENOENT) } else if os.IsPermission(err) { @@ -559,7 +590,7 @@ func libfuse_readdir(_ *C.char, buf unsafe.Pointer, filler C.fuse_fill_dir_t, of fuseFS.fillStat(cacheInfo.children[segmentIdx], &stbuf) name := C.CString(cacheInfo.children[segmentIdx].Name) - if 0 != C.fill_dir_entry(filler, buf, name, &stbuf, idx+1) { + if ret := C.fill_dir_entry(filler, buf, name, &stbuf, idx+1); ret != 0 { C.free(unsafe.Pointer(name)) break } @@ -640,7 +671,8 @@ func libfuse_create(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { name = common.NormalizeObjectName(name) log.Trace("Libfuse::libfuse_create : %s", name) - handle, err := fuseFS.NextComponent().CreateFile(internal.CreateFileOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) + handle, err := fuseFS.NextComponent(). + CreateFile(internal.CreateFileOptions{Name: name, Mode: fs.FileMode(uint32(mode) & 0xffffffff)}) if err != nil { log.Err("Libfuse::libfuse_create : Failed to create %s [%s]", name, err.Error()) if os.IsExist(err) { @@ -661,7 +693,11 @@ func libfuse_create(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { log.Trace("Libfuse::libfuse_create : %s, handle %d", name, handle.ID) fi.fh = C.ulong(uintptr(unsafe.Pointer(ret_val))) - libfuseStatsCollector.PushEvents(createFile, name, map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}) + libfuseStatsCollector.PushEvents( + createFile, + name, + map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}, + ) // increment open file handles count libfuseStatsCollector.UpdateStats(stats_manager.Increment, openHandles, (int64)(1)) @@ -690,7 +726,11 @@ func libfuse_open(path *C.char, fi *C.fuse_file_info_t) C.int { if !fuseFS.disableWritebackCache { if fi.flags&C.O_ACCMODE == C.O_WRONLY || fi.flags&C.O_APPEND != 0 { if fuseFS.ignoreOpenFlags { - log.Warn("Libfuse::libfuse_open : Flags (%X) not supported to open %s when write back cache is on. Ignoring unsupported flags.", fi.flags, name) + log.Warn( + "Libfuse::libfuse_open : Flags (%X) not supported to open %s when write back cache is on. Ignoring unsupported flags.", + fi.flags, + name, + ) // O_ACCMODE disables both RDONLY, WRONLY and RDWR flags fi.flags = fi.flags &^ (C.O_APPEND | C.O_ACCMODE) fi.flags = fi.flags | C.O_RDWR @@ -721,7 +761,11 @@ func libfuse_open(path *C.char, fi *C.fuse_file_info_t) C.int { handlemap.Add(handle) //fi.fh = C.ulong(uintptr(unsafe.Pointer(handle))) - ret_val := C.allocate_native_file_object(C.ulong(handle.UnixFD), C.ulong(uintptr(unsafe.Pointer(handle))), C.ulong(handle.Size)) + ret_val := C.allocate_native_file_object( + C.ulong(handle.UnixFD), + C.ulong(uintptr(unsafe.Pointer(handle))), + C.ulong(handle.Size), + ) if !handle.Cached() { ret_val.fd = 0 } @@ -737,7 +781,13 @@ func libfuse_open(path *C.char, fi *C.fuse_file_info_t) C.int { // libfuse_read reads data from an open file // //export libfuse_read -func libfuse_read(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.fuse_file_info_t) C.int { +func libfuse_read( + path *C.char, + buf *C.char, + size C.size_t, + off C.off_t, + fi *C.fuse_file_info_t, +) C.int { fileHandle := (*C.file_handle_t)(unsafe.Pointer(uintptr(fi.fh))) handle := (*handlemap.Handle)(unsafe.Pointer(uintptr(fileHandle.obj))) @@ -763,7 +813,12 @@ func libfuse_read(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.f err = nil } if err != nil { - log.Err("Libfuse::libfuse_read : error reading file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) + log.Err( + "Libfuse::libfuse_read : error reading file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) return -C.EIO } @@ -773,7 +828,13 @@ func libfuse_read(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.f // libfuse_write writes data to an open file // //export libfuse_write -func libfuse_write(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C.fuse_file_info_t) C.int { +func libfuse_write( + path *C.char, + buf *C.char, + size C.size_t, + off C.off_t, + fi *C.fuse_file_info_t, +) C.int { fileHandle := (*C.file_handle_t)(unsafe.Pointer(uintptr(fi.fh))) handle := (*handlemap.Handle)(unsafe.Pointer(uintptr(fileHandle.obj))) @@ -789,7 +850,12 @@ func libfuse_write(path *C.char, buf *C.char, size C.size_t, off C.off_t, fi *C. }) if err != nil { - log.Err("Libfuse::libfuse_write : error writing file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) + log.Err( + "Libfuse::libfuse_write : error writing file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) return -C.EIO } @@ -815,12 +881,18 @@ func libfuse_flush(path *C.char, fi *C.fuse_file_info_t) C.int { err := fuseFS.NextComponent().FlushFile(internal.FlushFileOptions{Handle: handle}) if err != nil { - log.Err("Libfuse::libfuse_flush : error flushing file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) - if err == syscall.ENOENT { + log.Err( + "Libfuse::libfuse_flush : error flushing file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) + switch err { + case syscall.ENOENT: return -C.ENOENT - } else if err == syscall.EACCES { + case syscall.EACCES: return -C.EACCES - } else { + default: return -C.EIO } } @@ -888,12 +960,18 @@ func libfuse_release(path *C.char, fi *C.fuse_file_info_t) C.int { err := fuseFS.NextComponent().CloseFile(internal.CloseFileOptions{Handle: handle}) if err != nil { - log.Err("Libfuse::libfuse_release : error closing file %s, handle: %d [%s]", handle.Path, handle.ID, err.Error()) - if err == syscall.ENOENT { + log.Err( + "Libfuse::libfuse_release : error closing file %s, handle: %d [%s]", + handle.Path, + handle.ID, + err.Error(), + ) + switch err { + case syscall.ENOENT: return -C.ENOENT - } else if err == syscall.EACCES { + case syscall.EACCES: return -C.EACCES - } else { + default: return -C.EIO } } @@ -954,13 +1032,21 @@ func libfuse_rename(src *C.char, dst *C.char, flags C.uint) C.int { // ENOENT. Not covered: a directory component in dst does not exist if srcPath == "" || dstPath == "" { - log.Err("Libfuse::libfuse_rename : src: [%s] or dst: [%s] is an empty string", srcPath, dstPath) + log.Err( + "Libfuse::libfuse_rename : src: [%s] or dst: [%s] is an empty string", + srcPath, + dstPath, + ) return -C.ENOENT } srcAttr, srcErr := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: srcPath}) if os.IsNotExist(srcErr) { - log.Err("Libfuse::libfuse_rename : Failed to get attributes of %s [%s]", srcPath, srcErr.Error()) + log.Err( + "Libfuse::libfuse_rename : Failed to get attributes of %s [%s]", + srcPath, + srcErr.Error(), + ) return -C.ENOENT } dstAttr, dstErr := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: dstPath}) @@ -972,13 +1058,21 @@ func libfuse_rename(src *C.char, dst *C.char, flags C.uint) C.int { // EISDIR if (dstErr == nil || os.IsExist(dstErr)) && dstAttr.IsDir() && !srcAttr.IsDir() { - log.Err("Libfuse::libfuse_rename : dst [%s] is an existing directory but src [%s] is not a directory", dstPath, srcPath) + log.Err( + "Libfuse::libfuse_rename : dst [%s] is an existing directory but src [%s] is not a directory", + dstPath, + srcPath, + ) return -C.EISDIR } // ENOTDIR if (dstErr == nil || os.IsExist(dstErr)) && !dstAttr.IsDir() && srcAttr.IsDir() { - log.Err("Libfuse::libfuse_rename : dst [%s] is an existing file but src [%s] is a directory", dstPath, srcPath) + log.Err( + "Libfuse::libfuse_rename : dst [%s] is an existing file but src [%s] is a directory", + dstPath, + srcPath, + ) return -C.ENOTDIR } @@ -996,11 +1090,20 @@ func libfuse_rename(src *C.char, dst *C.char, flags C.uint) C.int { Dst: dstPath, }) if err != nil { - log.Err("Libfuse::libfuse_rename : error renaming directory %s -> %s [%s]", srcPath, dstPath, err.Error()) + log.Err( + "Libfuse::libfuse_rename : error renaming directory %s -> %s [%s]", + srcPath, + dstPath, + err.Error(), + ) return -C.EIO } - libfuseStatsCollector.PushEvents(renameDir, srcPath, map[string]interface{}{source: srcPath, dest: dstPath}) + libfuseStatsCollector.PushEvents( + renameDir, + srcPath, + map[string]interface{}{source: srcPath, dest: dstPath}, + ) libfuseStatsCollector.UpdateStats(stats_manager.Increment, renameDir, (int64)(1)) } else { @@ -1035,9 +1138,15 @@ func libfuse_symlink(target *C.char, link *C.char) C.int { targetPath = common.NormalizeObjectName(targetPath) log.Trace("Libfuse::libfuse_symlink : Received for %s -> %s", name, targetPath) - err := fuseFS.NextComponent().CreateLink(internal.CreateLinkOptions{Name: name, Target: targetPath}) + err := fuseFS.NextComponent(). + CreateLink(internal.CreateLinkOptions{Name: name, Target: targetPath}) if err != nil { - log.Err("Libfuse::libfuse_symlink : error linking file %s -> %s [%s]", name, targetPath, err.Error()) + log.Err( + "Libfuse::libfuse_symlink : error linking file %s -> %s [%s]", + name, + targetPath, + err.Error(), + ) return -C.EIO } @@ -1061,7 +1170,8 @@ func libfuse_readlink(path *C.char, buf *C.char, size C.size_t) C.int { linkSize = attr.Size } - targetPath, err := fuseFS.NextComponent().ReadLink(internal.ReadLinkOptions{Name: name, Size: linkSize}) + targetPath, err := fuseFS.NextComponent(). + ReadLink(internal.ReadLinkOptions{Name: name, Size: linkSize}) if err != nil { log.Err("Libfuse::libfuse_readlink : error reading link file %s [%s]", name, err.Error()) if os.IsNotExist(err) { @@ -1154,7 +1264,11 @@ func libfuse_chmod(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { return -C.EIO } - libfuseStatsCollector.PushEvents(chmod, name, map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}) + libfuseStatsCollector.PushEvents( + chmod, + name, + map[string]interface{}{md: fs.FileMode(uint32(mode) & 0xffffffff)}, + ) libfuseStatsCollector.UpdateStats(stats_manager.Increment, chmod, (int64)(1)) return 0 diff --git a/component/libfuse/libfuse_handler_test_wrapper.go b/component/libfuse/libfuse_handler_test_wrapper.go index afd2fac99d..ca523c74a1 100644 --- a/component/libfuse/libfuse_handler_test_wrapper.go +++ b/component/libfuse/libfuse_handler_test_wrapper.go @@ -76,10 +76,10 @@ var defaultSize = int64(0) var defaultMode = 0777 func newTestLibfuse(next internal.Component, configuration string) *Libfuse { - config.ReadConfigFromReader(strings.NewReader(configuration)) + _ = config.ReadConfigFromReader(strings.NewReader(configuration)) libfuse := NewLibfuseComponent() libfuse.SetNextComponent(next) - libfuse.Configure(true) + _ = libfuse.Configure(true) return libfuse.(*Libfuse) } @@ -204,7 +204,9 @@ func testCreateError(suite *libfuseTestSuite) { mode := fs.FileMode(0775) info := &C.fuse_file_info_t{} options := internal.CreateFileOptions{Name: name, Mode: mode} - suite.mock.EXPECT().CreateFile(options).Return(&handlemap.Handle{}, errors.New("failed to create file")) + suite.mock.EXPECT(). + CreateFile(options). + Return(&handlemap.Handle{}, errors.New("failed to create file")) err := libfuse_create(path, 0775, info) suite.assert.Equal(C.int(-C.EIO), err) @@ -268,7 +270,9 @@ func testOpenAppendFlagDisableWritebackCache(suite *libfuseTestSuite) { defer suite.cleanupTest() suite.cleanupTest() // clean up the default libfuse generated config := "libfuse:\n disable-writeback-cache: true\n" - suite.setupTestHelper(config) // setup a new libfuse with a custom config (clean up will occur after the test as usual) + suite.setupTestHelper( + config, + ) // setup a new libfuse with a custom config (clean up will occur after the test as usual) suite.assert.True(suite.libfuse.disableWritebackCache) name := "path" @@ -298,7 +302,9 @@ func testOpenAppendFlagIgnoreAppendFlag(suite *libfuseTestSuite) { defer suite.cleanupTest() suite.cleanupTest() // clean up the default libfuse generated config := "libfuse:\n ignore-open-flags: true\n" - suite.setupTestHelper(config) // setup a new libfuse with a custom config (clean up will occur after the test as usual) + suite.setupTestHelper( + config, + ) // setup a new libfuse with a custom config (clean up will occur after the test as usual) suite.assert.True(suite.libfuse.ignoreOpenFlags) name := "path" @@ -361,7 +367,9 @@ func testOpenError(suite *libfuseTestSuite) { info := &C.fuse_file_info_t{} info.flags = C.O_RDWR options := internal.OpenFileOptions{Name: name, Flags: flags, Mode: mode} - suite.mock.EXPECT().OpenFile(options).Return(&handlemap.Handle{}, errors.New("failed to open a file")) + suite.mock.EXPECT(). + OpenFile(options). + Return(&handlemap.Handle{}, errors.New("failed to open a file")) err := libfuse_open(path, info) suite.assert.Equal(C.int(-C.EIO), err) @@ -401,7 +409,11 @@ func testFTruncate(suite *libfuseTestSuite) { size := int64(1024) handle := handlemap.NewHandle(name) - ret_val := C.allocate_native_file_object(C.ulong(handle.UnixFD), C.ulong(uintptr(unsafe.Pointer(handle))), C.ulong(handle.Size)) + ret_val := C.allocate_native_file_object( + C.ulong(handle.UnixFD), + C.ulong(uintptr(unsafe.Pointer(handle))), + C.ulong(handle.Size), + ) fi := C.fuse_file_info_t{} fi.fh = C.ulong(uintptr(unsafe.Pointer(ret_val))) @@ -420,7 +432,11 @@ func testFTruncateError(suite *libfuseTestSuite) { size := int64(1024) handle := handlemap.NewHandle(name) - ret_val := C.allocate_native_file_object(C.ulong(handle.UnixFD), C.ulong(uintptr(unsafe.Pointer(handle))), C.ulong(handle.Size)) + ret_val := C.allocate_native_file_object( + C.ulong(handle.UnixFD), + C.ulong(uintptr(unsafe.Pointer(handle))), + C.ulong(handle.Size), + ) fi := C.fuse_file_info_t{} fi.fh = C.ulong(uintptr(unsafe.Pointer(ret_val))) @@ -675,10 +691,10 @@ func testStatFs(suite *libfuseTestSuite) { buf := &C.statvfs_t{} libfuse_statfs(path, buf) - suite.assert.Equal(int(buf.f_frsize), 1) - suite.assert.Equal(int(buf.f_blocks), 2) - suite.assert.Equal(int(buf.f_bavail), 3) - suite.assert.Equal(int(buf.f_bfree), 4) + suite.assert.Equal(1, int(buf.f_frsize)) + suite.assert.Equal(2, int(buf.f_blocks)) + suite.assert.Equal(3, int(buf.f_bavail)) + suite.assert.Equal(4, int(buf.f_bfree)) } func testChmodError(suite *libfuseTestSuite) { diff --git a/component/loopback/loopback_fs.go b/component/loopback/loopback_fs.go index 593cb12439..a3711f5e21 100644 --- a/component/loopback/loopback_fs.go +++ b/component/loopback/loopback_fs.go @@ -166,7 +166,9 @@ func (lfs *LoopbackFS) ReadDir(options internal.ReadDirOptions) ([]*internal.Obj } // TODO: we can make it more intricate by generating a token and splitting streamed dir mimicking storage -func (lfs *LoopbackFS) StreamDir(options internal.StreamDirOptions) ([]*internal.ObjAttr, string, error) { +func (lfs *LoopbackFS) StreamDir( + options internal.StreamDirOptions, +) ([]*internal.ObjAttr, string, error) { if options.Token == "na" { return nil, "", nil } @@ -497,7 +499,11 @@ func (lfs *LoopbackFS) Chown(options internal.ChownOptions) error { func (lfs *LoopbackFS) StageData(options internal.StageDataOptions) error { log.Trace("LoopbackFS::StageData : name=%s, id=%s", options.Name, options.Id) - path := fmt.Sprintf("%s_%s", filepath.Join(lfs.path, options.Name), strings.ReplaceAll(options.Id, "/", "_")) + path := fmt.Sprintf( + "%s_%s", + filepath.Join(lfs.path, options.Name), + strings.ReplaceAll(options.Id, "/", "_"), + ) return os.WriteFile(path, options.Data, 0777) } @@ -520,7 +526,11 @@ func (lfs *LoopbackFS) CommitData(options internal.CommitDataOptions) error { } for idx, id := range options.List { - path := fmt.Sprintf("%s_%s", filepath.Join(lfs.path, options.Name), strings.ReplaceAll(id, "/", "_")) + path := fmt.Sprintf( + "%s_%s", + filepath.Join(lfs.path, options.Name), + strings.ReplaceAll(id, "/", "_"), + ) info, err := os.Lstat(path) if err == nil { block, err := os.OpenFile(path, os.O_RDONLY, os.FileMode(0666)) @@ -555,7 +565,11 @@ func (lfs *LoopbackFS) CommitData(options internal.CommitDataOptions) error { // delete the staged files for _, id := range options.List { - path := fmt.Sprintf("%s_%s", filepath.Join(lfs.path, options.Name), strings.ReplaceAll(id, "/", "_")) + path := fmt.Sprintf( + "%s_%s", + filepath.Join(lfs.path, options.Name), + strings.ReplaceAll(id, "/", "_"), + ) _ = os.Remove(path) } diff --git a/component/xload/blockpool.go b/component/xload/blockpool.go index 6ea3583520..efb53305b0 100644 --- a/component/xload/blockpool.go +++ b/component/xload/blockpool.go @@ -126,7 +126,11 @@ func (pool *BlockPool) Usage() uint32 { } func (pool *BlockPool) GetUsageDetails() (uint32, uint32, uint32, int32) { - return pool.maxBlocks, uint32(len(pool.priorityCh)), uint32(len(pool.blocksCh)), pool.waitLength.Load() + return pool.maxBlocks, uint32( + len(pool.priorityCh), + ), uint32( + len(pool.blocksCh), + ), pool.waitLength.Load() } func (pool *BlockPool) GetBlockSize() uint64 { @@ -152,8 +156,12 @@ func (pool *BlockPool) tryGet() *Block { select { case <-pool.ctx.Done(): - err := fmt.Errorf("Failed to Allocate Buffer as the process was cancelled, Len (blocksCh: %d, priorityCh: %d), MaxBlocks: %d", - len(pool.blocksCh), len(pool.priorityCh), pool.maxBlocks) + err := fmt.Errorf( + "Failed to Allocate Buffer as the process was cancelled, Len (blocksCh: %d, priorityCh: %d), MaxBlocks: %d", + len(pool.blocksCh), + len(pool.priorityCh), + pool.maxBlocks, + ) log.Debug("BlockPool::GetBlock : %v", err) return nil // getting a block from pool will be a blocking operation if the pool is empty @@ -171,8 +179,12 @@ func (pool *BlockPool) mustGet() *Block { select { case <-pool.ctx.Done(): - err := fmt.Errorf("Failed to Allocate Buffer as the process was cancelled, Len (priorityCh: %d, blockCh: %d), MaxBlocks: %d", - len(pool.priorityCh), len(pool.blocksCh), pool.maxBlocks) + err := fmt.Errorf( + "Failed to Allocate Buffer as the process was cancelled, Len (priorityCh: %d, blockCh: %d), MaxBlocks: %d", + len(pool.priorityCh), + len(pool.blocksCh), + pool.maxBlocks, + ) log.Debug("BlockPool::MustGet : %v", err) return nil case block = <-pool.priorityCh: diff --git a/component/xload/data_manager.go b/component/xload/data_manager.go index babbb63933..c75a4509f8 100644 --- a/component/xload/data_manager.go +++ b/component/xload/data_manager.go @@ -63,11 +63,16 @@ type remoteDataManagerOptions struct { func newRemoteDataManager(opts *remoteDataManagerOptions) (*remoteDataManager, error) { if opts == nil || opts.remote == nil || opts.statsMgr == nil || opts.workerCount == 0 { - log.Err("data_manager::NewRemoteDataManager : invalid parameters sent to create remote data manager") + log.Err( + "data_manager::NewRemoteDataManager : invalid parameters sent to create remote data manager", + ) return nil, fmt.Errorf("invalid parameters sent to create remote data manager") } - log.Debug("data_manager::NewRemoteDataManager : create new remote data manager, workers %v", opts.workerCount) + log.Debug( + "data_manager::NewRemoteDataManager : create new remote data manager, workers %v", + opts.workerCount, + ) rdm := &remoteDataManager{} @@ -103,8 +108,16 @@ func (rdm *remoteDataManager) Stop() { func (rdm *remoteDataManager) Process(item *WorkItem) (int, error) { select { case <-item.Ctx.Done(): // listen for cancellation signal - log.Err("remoteDataManager::Process : Cancelling download for offset %v of %v", item.Block.Offset, item.Path) - return 0, fmt.Errorf("cancelling download for offset %v of %v", item.Block.Offset, item.Path) + log.Err( + "remoteDataManager::Process : Cancelling download for offset %v of %v", + item.Block.Offset, + item.Path, + ) + return 0, fmt.Errorf( + "cancelling download for offset %v of %v", + item.Block.Offset, + item.Path, + ) default: if item.Download { @@ -159,7 +172,12 @@ func (rdm *remoteDataManager) WriteData(item *WorkItem) (int, error) { */ // send stats to stats manager -func (rdm *remoteDataManager) sendStats(path string, isDownload bool, bytesTransferred uint64, isSuccess bool) { +func (rdm *remoteDataManager) sendStats( + path string, + isDownload bool, + bytesTransferred uint64, + isSuccess bool, +) { rdm.GetStatsManager().AddStats(&StatsItem{ Component: DATA_MANAGER, Name: path, diff --git a/component/xload/lister.go b/component/xload/lister.go index 7f56477258..3f3587b105 100644 --- a/component/xload/lister.go +++ b/component/xload/lister.go @@ -78,12 +78,18 @@ type remoteListerOptions struct { } func newRemoteLister(opts *remoteListerOptions) (*remoteLister, error) { - if opts == nil || opts.path == "" || opts.remote == nil || opts.statsMgr == nil || opts.workerCount == 0 { + if opts == nil || opts.path == "" || opts.remote == nil || opts.statsMgr == nil || + opts.workerCount == 0 { log.Err("lister::NewRemoteLister : invalid parameters sent to create remote lister") return nil, fmt.Errorf("invalid parameters sent to create remote lister") } - log.Debug("lister::NewRemoteLister : create new remote lister for %s, default permission %v, workers %v", opts.path, opts.defaultPermission, opts.workerCount) + log.Debug( + "lister::NewRemoteLister : create new remote lister for %s, default permission %v, workers %v", + opts.path, + opts.defaultPermission, + opts.workerCount, + ) rl := &remoteLister{ lister: lister{ @@ -141,10 +147,15 @@ func (rl *remoteLister) Process(item *WorkItem) (int, error) { // this block will be executed only in the first list call for the remote directory // so haven't made the listBlocked variable atomic if !rl.listBlocked { - log.Debug("remoteLister::Process : Waiting for block-list-on-mount-sec before making the list call") + log.Debug( + "remoteLister::Process : Waiting for block-list-on-mount-sec before making the list call", + ) err := waitForListTimeout() if err != nil { - log.Err("remoteLister::Process : unable to unmarshal block-list-on-mount-sec [%s]", err.Error()) + log.Err( + "remoteLister::Process : unable to unmarshal block-list-on-mount-sec [%s]", + err.Error(), + ) return 0, err } rl.listBlocked = true @@ -158,7 +169,11 @@ func (rl *remoteLister) Process(item *WorkItem) (int, error) { Token: marker, }) if err != nil { - log.Err("remoteLister::Process : Remote listing failed for %s [%s]", relPath, err.Error()) + log.Err( + "remoteLister::Process : Remote listing failed for %s [%s]", + relPath, + err.Error(), + ) break } @@ -175,7 +190,11 @@ func (rl *remoteLister) Process(item *WorkItem) (int, error) { }) for _, entry := range entries { - log.Debug("remoteLister::Process : Iterating: %s, Is directory: %v", entry.Path, entry.IsDir()) + log.Debug( + "remoteLister::Process : Iterating: %s, Is directory: %v", + entry.Path, + entry.IsDir(), + ) if entry.IsDir() { // create directory in local @@ -187,7 +206,10 @@ func (rl *remoteLister) Process(item *WorkItem) (int, error) { err = rl.mkdir(localPath) // TODO:: xload : handle error if err != nil { - log.Err("remoteLister::Process : Failed to create directory [%s]", err.Error()) + log.Err( + "remoteLister::Process : Failed to create directory [%s]", + err.Error(), + ) return } @@ -197,7 +219,11 @@ func (rl *remoteLister) Process(item *WorkItem) (int, error) { Path: name, }) if err != nil { - log.Err("remoteLister::Process : Failed to schedule directory listing for %s [%s]", name, err.Error()) + log.Err( + "remoteLister::Process : Failed to schedule directory listing for %s [%s]", + name, + err.Error(), + ) return } }(entry.Path) diff --git a/component/xload/splitter.go b/component/xload/splitter.go index 7e3fdb1571..1167b036d2 100644 --- a/component/xload/splitter.go +++ b/component/xload/splitter.go @@ -76,12 +76,20 @@ type downloadSplitterOptions struct { } func newDownloadSplitter(opts *downloadSplitterOptions) (*downloadSplitter, error) { - if opts == nil || opts.blockPool == nil || opts.path == "" || opts.remote == nil || opts.statsMgr == nil || opts.fileLocks == nil || opts.workerCount == 0 { + if opts == nil || opts.blockPool == nil || opts.path == "" || opts.remote == nil || + opts.statsMgr == nil || + opts.fileLocks == nil || + opts.workerCount == 0 { log.Err("lister::NewRemoteLister : invalid parameters sent to create download splitter") return nil, fmt.Errorf("invalid parameters sent to create download splitter") } - log.Debug("splitter::NewDownloadSplitter : create new download splitter for %s, block size %v, workers %v", opts.path, opts.blockPool.GetBlockSize(), opts.workerCount) + log.Debug( + "splitter::NewDownloadSplitter : create new download splitter for %s, block size %v, workers %v", + opts.path, + opts.blockPool.GetBlockSize(), + opts.workerCount, + ) ds := &downloadSplitter{ splitter: splitter{ @@ -122,8 +130,15 @@ func (ds *downloadSplitter) Stop() { // download data in chunks and then write to the local file func (ds *downloadSplitter) Process(item *WorkItem) (int, error) { - log.Debug("downloadSplitter::Process : Splitting data for %s, size %v, mode %v, priority %v, access time %v, modified time %v", item.Path, item.DataLen, - item.Mode, item.Priority, item.Atime.Format(time.DateTime), item.Mtime.Format(time.DateTime)) + log.Debug( + "downloadSplitter::Process : Splitting data for %s, size %v, mode %v, priority %v, access time %v, modified time %v", + item.Path, + item.DataLen, + item.Mode, + item.Priority, + item.Atime.Format(time.DateTime), + item.Mtime.Format(time.DateTime), + ) var err error localPath := filepath.Join(ds.path, item.Path) @@ -176,12 +191,20 @@ func (ds *downloadSplitter) Process(item *WorkItem) (int, error) { // truncate the file to its size err = item.FileHandle.Truncate(int64(item.DataLen)) if err != nil { - log.Err("downloadSplitter::Process : Failed to truncate file %s, so deleting it from local path [%s]", item.Path, err.Error()) + log.Err( + "downloadSplitter::Process : Failed to truncate file %s, so deleting it from local path [%s]", + item.Path, + err.Error(), + ) // delete the file which failed to truncate from the local path err1 := os.Remove(localPath) if err1 != nil { - log.Err("downloadSplitter::Process : Failed to delete file %s [%s]", item.Path, err1.Error()) + log.Err( + "downloadSplitter::Process : Failed to delete file %s [%s]", + item.Path, + err1.Error(), + ) } return -1, fmt.Errorf("failed to truncate file %s [%s]", item.Path, err.Error()) @@ -209,7 +232,10 @@ func (ds *downloadSplitter) Process(item *WorkItem) (int, error) { return case respSplitItem := <-responseChannel: if respSplitItem.Err != nil { - log.Err("downloadSplitter::Process : Failed to download data for file %s", item.Path) + log.Err( + "downloadSplitter::Process : Failed to download data for file %s", + item.Path, + ) operationSuccess = false cancel() // cancel the context to stop download of other chunks } else { @@ -277,14 +303,22 @@ func (ds *downloadSplitter) Process(item *WorkItem) (int, error) { // TODO:: xload : verify if the lmt is updated correctly err = os.Chtimes(localPath, item.Atime, item.Mtime) if err != nil { - log.Err("downloadSplitter::Process : Failed to change times of file %s [%s]", item.Path, err.Error()) + log.Err( + "downloadSplitter::Process : Failed to change times of file %s [%s]", + item.Path, + err.Error(), + ) } if ds.validateMD5 { err = ds.checkConsistency(item) if err != nil { // TODO:: xload : retry if md5 validation fails - log.Err("downloadSplitter::Process : unable to validate md5 for %s [%s]", item.Path, err.Error()) + log.Err( + "downloadSplitter::Process : unable to validate md5 for %s [%s]", + item.Path, + err.Error(), + ) operationSuccess = false } } @@ -299,18 +333,29 @@ func (ds *downloadSplitter) Process(item *WorkItem) (int, error) { }) if !operationSuccess { - log.Err("downloadSplitter::Process : Failed to download data for file %s, so deleting it from local path", item.Path) + log.Err( + "downloadSplitter::Process : Failed to download data for file %s, so deleting it from local path", + item.Path, + ) // delete the file which failed to download from the local path err = os.Remove(localPath) if err != nil { - log.Err("downloadSplitter::Process : Failed to delete file %s [%s]", item.Path, err.Error()) + log.Err( + "downloadSplitter::Process : Failed to delete file %s [%s]", + item.Path, + err.Error(), + ) } return -1, fmt.Errorf("failed to download data for file %s", item.Path) } - log.Debug("downloadSplitter::Process : Download completed for file %s, priority %v", item.Path, item.Priority) + log.Debug( + "downloadSplitter::Process : Download completed for file %s, priority %v", + item.Path, + item.Priority, + ) return 0, nil } diff --git a/component/xload/stats_manager.go b/component/xload/stats_manager.go index 10277ed0b6..bc8ab337ac 100644 --- a/component/xload/stats_manager.go +++ b/component/xload/stats_manager.go @@ -96,11 +96,17 @@ func NewStatsManager(count uint32, isExportEnabled bool, pool *BlockPool) (*Stat var err error if isExportEnabled { pid := fmt.Sprintf("%v", os.Getpid()) - path := common.ExpandPath(filepath.Join(common.DefaultWorkDir, strings.ReplaceAll(JSON_FILE_NAME, "{PID}", pid))) + path := common.ExpandPath( + filepath.Join(common.DefaultWorkDir, strings.ReplaceAll(JSON_FILE_NAME, "{PID}", pid)), + ) log.Crit("statsManager::NewStatsManager : creating json file %v", path) fh, err = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) if err != nil { - log.Err("statsManager::NewStatsManager : failed to create json file %v [%v]", path, err.Error()) + log.Err( + "statsManager::NewStatsManager : failed to create json file %v [%v]", + path, + err.Error(), + ) return nil, err } } @@ -116,7 +122,10 @@ func NewStatsManager(count uint32, isExportEnabled bool, pool *BlockPool) (*Stat func (sm *StatsManager) Start() { sm.waitGroup.Add(1) sm.startTime = time.Now().UTC() - log.Debug("statsManager::start : start stats manager at time %v", sm.startTime.Format(time.RFC1123)) + log.Debug( + "statsManager::start : start stats manager at time %v", + sm.startTime.Format(time.RFC1123), + ) _ = sm.writeToJSON([]byte("[\n"), false) _ = sm.marshalStatsData(&statsJSONData{Timestamp: sm.startTime.Format(time.RFC1123)}, false) _ = sm.writeToJSON([]byte("\n]"), false) @@ -129,7 +138,9 @@ func (sm *StatsManager) Start() { func (sm *StatsManager) Stop() { log.Debug("statsManager::stop : stop stats manager") sm.done <- true // close the stats exporter thread - close(sm.done) // TODO::xload : check if closing the done channel here will lead to closing the stats exporter thread + close( + sm.done, + ) // TODO::xload : check if closing the done channel here will lead to closing the stats exporter thread close(sm.items) sm.waitGroup.Wait() @@ -212,7 +223,10 @@ func (sm *StatsManager) statsExporter() { func (sm *StatsManager) calculateBandwidth() { if sm.totalFiles == 0 { - log.Debug("statsManager::calculateBandwidth : skipping as total files listed so far is %v", sm.totalFiles) + log.Debug( + "statsManager::calculateBandwidth : skipping as total files listed so far is %v", + sm.totalFiles, + ) return } @@ -225,12 +239,12 @@ func (sm *StatsManager) calculateBandwidth() { bandwidthMbps := float64(bytesTransferred*8) / (timeLapsed * float64(MB)) diskSpeedMbps := float64(sm.diskIOBytes*8) / (timeLapsed * float64(MB)) - var max, pr, reg uint32 + var maxUsage, pr, reg uint32 var waiting int32 var poolusage uint32 if sm.pool != nil { - max, pr, reg, waiting = sm.pool.GetUsageDetails() + maxUsage, pr, reg, waiting = sm.pool.GetUsageDetails() sm.pool.Usage() } @@ -238,7 +252,7 @@ func (sm *StatsManager) calculateBandwidth() { "%v Pending, %v Total, Bytes transferred %v, Throughput (Mbps): %.2f, Disk Speed (Mbps): %.2f, Blockpool usage: %v%%, (%v / %v / %v : %v), Time: %.2f", currTime.Format(time.RFC1123), percentCompleted, sm.success, sm.failed, filesPending, sm.totalFiles, bytesTransferred, bandwidthMbps, diskSpeedMbps, poolusage, - max, pr, reg, waiting, timeLapsed) + maxUsage, pr, reg, waiting, timeLapsed) if sm.fileHandle != nil { err := sm.marshalStatsData(&statsJSONData{ @@ -252,7 +266,10 @@ func (sm *StatsManager) calculateBandwidth() { BandwidthMbps: RoundFloat(bandwidthMbps, 2), }, true) if err != nil { - log.Err("statsManager::calculateBandwidth : failed to write to json file [%v]", err.Error()) + log.Err( + "statsManager::calculateBandwidth : failed to write to json file [%v]", + err.Error(), + ) } } diff --git a/component/xload/threadpool.go b/component/xload/threadpool.go index f7376d5d20..45bce51979 100644 --- a/component/xload/threadpool.go +++ b/component/xload/threadpool.go @@ -102,7 +102,10 @@ func (threadPool *ThreadPool) Schedule(item *WorkItem) error { // true means high priority and false means low priority select { case <-threadPool.ctx.Done(): - log.Err("ThreadPool::Schedule : Thread pool is closed, cannot schedule workitem %s", item.Path) + log.Err( + "ThreadPool::Schedule : Thread pool is closed, cannot schedule workitem %s", + item.Path, + ) return fmt.Errorf("thread pool is closed, cannot schedule workitem %s", item.Path) default: if item.Priority { @@ -157,7 +160,12 @@ func (threadPool *ThreadPool) process(item *WorkItem) { dataLength, err := threadPool.callback(item) if err != nil { // TODO:: xload : add retry logic - log.Err("ThreadPool::Do : Error in %s processing workitem %s : %v", item.CompName, item.Path, err.Error()) + log.Err( + "ThreadPool::Do : Error in %s processing workitem %s : %v", + item.CompName, + item.Path, + err.Error(), + ) } // add this error in response channel diff --git a/component/xload/utils.go b/component/xload/utils.go index 9682149d4e..71a261aaf5 100644 --- a/component/xload/utils.go +++ b/component/xload/utils.go @@ -116,7 +116,11 @@ func RoundFloat(val float64, precision int) float64 { func isFilePresent(localPath string) (bool, bool, int64) { fileInfo, err := os.Stat(localPath) if err != nil { - log.Debug("utils::isFilePresent : %s is not present in local path [%v]", localPath, err.Error()) + log.Debug( + "utils::isFilePresent : %s is not present in local path [%v]", + localPath, + err.Error(), + ) return false, false, 0 } else { return true, fileInfo.IsDir(), fileInfo.Size() diff --git a/component/xload/xload.go b/component/xload/xload.go index 538653c58d..187b7f0eb9 100644 --- a/component/xload/xload.go +++ b/component/xload/xload.go @@ -71,14 +71,14 @@ type Xload struct { // Structure defining your config parameters type XloadOptions struct { - BlockSize float64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` - Mode string `config:"mode" yaml:"mode,omitempty"` - Path string `config:"path" yaml:"path,omitempty"` - ExportProgress bool `config:"export-progress" yaml:"path,omitempty"` - ValidateMD5 bool `config:"validate-md5" yaml:"validate-md5,omitempty"` + BlockSize float64 `config:"block-size-mb" yaml:"block-size-mb,omitempty"` + Mode string `config:"mode" yaml:"mode,omitempty"` + Path string `config:"path" yaml:"path,omitempty"` + ExportProgress bool `config:"export-progress" yaml:"path,omitempty"` + ValidateMD5 bool `config:"validate-md5" yaml:"validate-md5,omitempty"` CleanupOnStart bool `config:"cleanup-on-start" yaml:"cleanup-on-start,omitempty"` - Workers int32 `config:"workers" yaml:"workers,omitempty"` - PoolSize uint32 `config:"pool-size" yaml:"pool-size,omitempty"` + Workers int32 `config:"workers" yaml:"workers,omitempty"` + PoolSize uint32 `config:"pool-size" yaml:"pool-size,omitempty"` // TODO:: xload : add parallelism parameter } @@ -185,7 +185,7 @@ func (xl *Xload) Configure(_ bool) error { } } - var mode Mode = EMode.PRELOAD() // using preload as the default mode + var mode = EMode.PRELOAD() // using preload as the default mode if len(conf.Mode) > 0 { err = mode.Parse(conf.Mode) if err != nil { @@ -228,8 +228,15 @@ func (xl *Xload) Configure(_ bool) error { xl.poolctx, xl.poolCancelFunc = context.WithCancel(context.Background()) - log.Crit("Xload::Configure : block size %v, mode %v, path %v, default permission %v, export progress %v, validate md5 %v", xl.blockSize, - xl.mode.String(), xl.path, xl.defaultPermission, xl.exportProgress, xl.validateMD5) + log.Crit( + "Xload::Configure : block size %v, mode %v, path %v, default permission %v, export progress %v, validate md5 %v", + xl.blockSize, + xl.mode.String(), + xl.path, + xl.defaultPermission, + xl.exportProgress, + xl.validateMD5, + ) return nil } @@ -420,7 +427,11 @@ func (xl *Xload) downloadFile(fileName string) error { // create the local path where the file will be downloaded err = os.MkdirAll(filepath.Dir(filepath.Join(xl.path, fileName)), xl.defaultPermission) if err != nil { - log.Err("Xload::downloadFile : Failed to create local directory for %s [%s]", fileName, err.Error()) + log.Err( + "Xload::downloadFile : Failed to create local directory for %s [%s]", + fileName, + err.Error(), + ) return err } @@ -445,7 +456,12 @@ func (xl *Xload) downloadFile(fileName string) error { // OpenFile: Download the file if not already downloaded and return the file handle func (xl *Xload) OpenFile(options internal.OpenFileOptions) (*handlemap.Handle, error) { - log.Trace("Xload::OpenFile : name=%s, flags=%d, mode=%s", options.Name, options.Flags, options.Mode) + log.Trace( + "Xload::OpenFile : name=%s, flags=%d, mode=%s", + options.Name, + options.Flags, + options.Mode, + ) localPath := filepath.Join(xl.path, options.Name) flock := xl.fileLocks.Get(options.Name) @@ -515,9 +531,17 @@ func NewXloadComponent() internal.Component { func init() { internal.AddComponent(compName, NewXloadComponent) - workers := config.AddInt32Flag("workers", 100, "number of workers to execute parallel download during preload") + workers := config.AddInt32Flag( + "workers", + 100, + "number of workers to execute parallel download during preload", + ) config.BindPFlag(compName+".workers", workers) - poolSize := config.AddInt32Flag("pool-size", 300, "number of blocks in the blockpool for preload") + poolSize := config.AddInt32Flag( + "pool-size", + 300, + "number of blocks in the blockpool for preload", + ) config.BindPFlag(compName+".pool-size", poolSize) } diff --git a/internal/base_component.go b/internal/base_component.go index ea0c90b1c3..3d3efd46c8 100644 --- a/internal/base_component.go +++ b/internal/base_component.go @@ -286,7 +286,9 @@ func (base *BaseComponent) GetAttr(options GetAttrOptions) (*ObjAttr, error) { return &ObjAttr{}, nil } -func (base *BaseComponent) GetFileBlockOffsets(options GetFileBlockOffsetsOptions) (*common.BlockOffsetList, error) { +func (base *BaseComponent) GetFileBlockOffsets( + options GetFileBlockOffsetsOptions, +) (*common.BlockOffsetList, error) { if base.next != nil { return base.next.GetFileBlockOffsets(options) } diff --git a/internal/pipeline.go b/internal/pipeline.go index 1676b8e601..e5f8565171 100644 --- a/internal/pipeline.go +++ b/internal/pipeline.go @@ -83,9 +83,15 @@ func NewPipeline(components []string, isParent bool) (*Pipeline, error) { return nil, err } - if !(comp.Priority() <= lastPriority) { - log.Err("Pipeline::NewPipeline : Invalid Component order [priority of %s higher than above components]", comp.Name()) - return nil, fmt.Errorf("config error in Pipeline [component %s is out of order]", name) + if comp.Priority() > lastPriority { + log.Err( + "Pipeline::NewPipeline : Invalid Component order [priority of %s higher than above components]", + comp.Name(), + ) + return nil, fmt.Errorf( + "config error in Pipeline [component %s is out of order]", + name, + ) } else { lastPriority = comp.Priority() } diff --git a/internal/stats_manager/stats_manager.go b/internal/stats_manager/stats_manager.go index 7194438b1d..60e0f9ab65 100644 --- a/internal/stats_manager/stats_manager.go +++ b/internal/stats_manager/stats_manager.go @@ -232,7 +232,7 @@ func (sc *StatsCollector) statsDumper() { // log.Debug("stats_manager::statsDumper : stats: %v", string(msg)) stMgrOpt.transferMtx.Lock() - _, err = f.WriteString(fmt.Sprintf("%v\n", string(msg))) + _, err = fmt.Fprintf(f, "%v\n", string(msg)) stMgrOpt.transferMtx.Unlock() if err != nil { log.Err("stats_manager::statsDumper : Unable to write to pipe [%v]", err) @@ -340,7 +340,10 @@ func statsPolling() { } if cmpSt.Timestamp == stMgrOpt.cmpTimeMap[cmpSt.ComponentName] { - log.Debug("stats_manager::statsPolling : Skipping as there is no change in stats collected for %v", cmpSt.ComponentName) + log.Debug( + "stats_manager::statsPolling : Skipping as there is no change in stats collected for %v", + cmpSt.ComponentName, + ) continue } @@ -354,7 +357,7 @@ func statsPolling() { // send the stats collected so far to transfer pipe stMgrOpt.transferMtx.Lock() - _, err = tf.WriteString(fmt.Sprintf("%v\n", string(msg))) + _, err = fmt.Fprintf(tf, "%v\n", string(msg)) stMgrOpt.transferMtx.Unlock() if err != nil { log.Err("stats_manager::statsDumper : Unable to write to pipe [%v]", err) diff --git a/main.go b/main.go index 7fdbddbb0a..bd4c5bc1d8 100644 --- a/main.go +++ b/main.go @@ -36,7 +36,6 @@ package main import ( "github.com/Azure/azure-storage-fuse/v2/cmd" "github.com/Azure/azure-storage-fuse/v2/common/log" - _ "github.com/Azure/azure-storage-fuse/v2/common/log" ) //go:generate ./cmd/componentGenerator.sh $NAME diff --git a/tools/health-monitor/internal/stats_export.go b/tools/health-monitor/internal/stats_export.go index e229950821..534c2954fe 100644 --- a/tools/health-monitor/internal/stats_export.go +++ b/tools/health-monitor/internal/stats_export.go @@ -174,15 +174,19 @@ func (se *StatsExporter) StatsExporter() { } func (se *StatsExporter) addToList(st *ExportedStat, idx int) { - if st.MonitorName == hmcommon.BlobfuseStats { + switch st.MonitorName { + case hmcommon.BlobfuseStats: se.outputList[idx].Bfs = append(se.outputList[idx].Bfs, st.Stat.(stats_manager.PipeMsg)) - } else if st.MonitorName == hmcommon.FileCacheMon { - se.outputList[idx].FcEvent = append(se.outputList[idx].FcEvent, st.Stat.(*hmcommon.CacheEvent)) - } else if st.MonitorName == hmcommon.CpuProfiler { + case hmcommon.FileCacheMon: + se.outputList[idx].FcEvent = append( + se.outputList[idx].FcEvent, + st.Stat.(*hmcommon.CacheEvent), + ) + case hmcommon.CpuProfiler: se.outputList[idx].Cpu = st.Stat.(string) - } else if st.MonitorName == hmcommon.MemoryProfiler { + case hmcommon.MemoryProfiler: se.outputList[idx].Mem = st.Stat.(string) - } else if st.MonitorName == hmcommon.NetworkProfiler { + case hmcommon.NetworkProfiler: se.outputList[idx].Net = st.Stat.(string) } } @@ -265,12 +269,24 @@ func (se *StatsExporter) getNewFile() error { baseName := filepath.Join(hmcommon.OutputPath, hmcommon.OutputFileName) // Remove the oldest file - fname = fmt.Sprintf("%v_%v_%v.%v", baseName, hmcommon.Pid, (hmcommon.OutputFileCount - 1), hmcommon.OutputFileExtension) + fname = fmt.Sprintf( + "%v_%v_%v.%v", + baseName, + hmcommon.Pid, + (hmcommon.OutputFileCount - 1), + hmcommon.OutputFileExtension, + ) _ = os.Remove(fname) for i := hmcommon.OutputFileCount - 2; i > 0; i-- { fname = fmt.Sprintf("%v_%v_%v.%v", baseName, hmcommon.Pid, i, hmcommon.OutputFileExtension) - fnameNew = fmt.Sprintf("%v_%v_%v.%v", baseName, hmcommon.Pid, (i + 1), hmcommon.OutputFileExtension) + fnameNew = fmt.Sprintf( + "%v_%v_%v.%v", + baseName, + hmcommon.Pid, + (i + 1), + hmcommon.OutputFileExtension, + ) // Move each file to next number 8 -> 9, 7 -> 8, 6 -> 7 ... _ = os.Rename(fname, fnameNew) @@ -300,7 +316,10 @@ func (se *StatsExporter) getNewFile() error { func CloseExporter() error { se, err := NewStatsExporter() if err != nil || se == nil { - log.Err("stats_exporter::CloseExporter : Error in creating stats exporter instance [%v]", err) + log.Err( + "stats_exporter::CloseExporter : Error in creating stats exporter instance [%v]", + err, + ) return err } diff --git a/tools/health-monitor/main.go b/tools/health-monitor/main.go index 8c7b4685e2..a1ad74bf16 100644 --- a/tools/health-monitor/main.go +++ b/tools/health-monitor/main.go @@ -96,7 +96,9 @@ func main() { if len(strings.TrimSpace(hmcommon.Pid)) == 0 { fmt.Printf("pid of blobfuse2 process not provided\n") log.Err("main::main : pid of blobfuse2 process not provided") - time.Sleep(1 * time.Second) // adding 1 second wait for adding to log(base type) before exiting + time.Sleep( + 1 * time.Second, + ) // adding 1 second wait for adding to log(base type) before exiting os.Exit(1) } @@ -113,16 +115,24 @@ func main() { common.TransferPipe += "_" + hmcommon.Pid common.PollingPipe += "_" + hmcommon.Pid - log.Debug("Blobfuse2 Pid: %v \n"+ - "Transfer Pipe: %v \n"+ - "Polling Pipe: %v \n"+ - "Blobfuse2 Stats poll interval: %v \n"+ - "Health Stats poll interval: %v \n"+ - "Cache Path: %v \n"+ - "Max cache size in MB: %v \n", + log.Debug( + "Blobfuse2 Pid: %v \n"+ + "Transfer Pipe: %v \n"+ + "Polling Pipe: %v \n"+ + "Blobfuse2 Stats poll interval: %v \n"+ + "Health Stats poll interval: %v \n"+ + "Cache Path: %v \n"+ + "Max cache size in MB: %v \n", "Output path: %v", - hmcommon.Pid, common.TransferPipe, common.PollingPipe, hmcommon.BfsPollInterval, - hmcommon.ProcMonInterval, hmcommon.TempCachePath, hmcommon.MaxCacheSize, hmcommon.OutputPath) + hmcommon.Pid, + common.TransferPipe, + common.PollingPipe, + hmcommon.BfsPollInterval, + hmcommon.ProcMonInterval, + hmcommon.TempCachePath, + hmcommon.MaxCacheSize, + hmcommon.OutputPath, + ) comps := getMonitors() @@ -145,18 +155,63 @@ func main() { func init() { flag.StringVar(&hmcommon.Pid, "pid", "", "Pid of blobfuse2 process") - flag.IntVar(&hmcommon.BfsPollInterval, "stats-poll-interval-sec", 10, "Blobfuse2 stats polling interval in seconds") - flag.IntVar(&hmcommon.ProcMonInterval, "process-monitor-interval-sec", 30, "CPU, memory and network usage polling interval in seconds") - flag.StringVar(&hmcommon.OutputPath, "output-path", "", "Path where output files will be created") + flag.IntVar( + &hmcommon.BfsPollInterval, + "stats-poll-interval-sec", + 10, + "Blobfuse2 stats polling interval in seconds", + ) + flag.IntVar( + &hmcommon.ProcMonInterval, + "process-monitor-interval-sec", + 30, + "CPU, memory and network usage polling interval in seconds", + ) + flag.StringVar( + &hmcommon.OutputPath, + "output-path", + "", + "Path where output files will be created", + ) flag.BoolVar(&hmcommon.NoBfsMon, "no-blobfuse2-stats", false, "Disable blobfuse2 stats polling") - flag.BoolVar(&hmcommon.NoCpuProf, "no-cpu-profiler", false, "Disable CPU monitoring on blobfuse2 process") - flag.BoolVar(&hmcommon.NoMemProf, "no-memory-profiler", false, "Disable memory monitoring on blobfuse2 process") - flag.BoolVar(&hmcommon.NoNetProf, "no-network-profiler", false, "Disable network monitoring on blobfuse2 process") - flag.BoolVar(&hmcommon.NoFileCacheMon, "no-file-cache-monitor", false, "Disable file cache directory monitor") + flag.BoolVar( + &hmcommon.NoCpuProf, + "no-cpu-profiler", + false, + "Disable CPU monitoring on blobfuse2 process", + ) + flag.BoolVar( + &hmcommon.NoMemProf, + "no-memory-profiler", + false, + "Disable memory monitoring on blobfuse2 process", + ) + flag.BoolVar( + &hmcommon.NoNetProf, + "no-network-profiler", + false, + "Disable network monitoring on blobfuse2 process", + ) + flag.BoolVar( + &hmcommon.NoFileCacheMon, + "no-file-cache-monitor", + false, + "Disable file cache directory monitor", + ) flag.StringVar(&hmcommon.TempCachePath, "cache-path", "", "path to local disk cache") - flag.Float64Var(&hmcommon.MaxCacheSize, "max-size-mb", 0, "maximum cache size allowed. Default - 0 (unlimited)") - - flag.BoolVar(&hmcommon.CheckVersion, "version", false, "Print the current version of health-monitor") + flag.Float64Var( + &hmcommon.MaxCacheSize, + "max-size-mb", + 0, + "maximum cache size allowed. Default - 0 (unlimited)", + ) + + flag.BoolVar( + &hmcommon.CheckVersion, + "version", + false, + "Print the current version of health-monitor", + ) } diff --git a/tools/health-monitor/monitor/blobfuse_stats/stats_reader.go b/tools/health-monitor/monitor/blobfuse_stats/stats_reader.go index 3f9c9730c7..470b4c77de 100644 --- a/tools/health-monitor/monitor/blobfuse_stats/stats_reader.go +++ b/tools/health-monitor/monitor/blobfuse_stats/stats_reader.go @@ -156,7 +156,7 @@ func (bfs *BlobfuseStats) statsPoll() { defer ticker.Stop() for t := range ticker.C { - _, err = pf.WriteString(fmt.Sprintf("Poll at %v\n", t.Format(time.RFC3339))) + _, err = fmt.Fprintf(pf, "Poll at %v\n", t.Format(time.RFC3339)) if err != nil { log.Err("StatsReader::statsPoll : [%v]", err) break diff --git a/tools/health-monitor/monitor/cpu_mem_profiler/cpu_mem_monitor.go b/tools/health-monitor/monitor/cpu_mem_profiler/cpu_mem_monitor.go index 1980b9c562..da1a884174 100644 --- a/tools/health-monitor/monitor/cpu_mem_profiler/cpu_mem_monitor.go +++ b/tools/health-monitor/monitor/cpu_mem_profiler/cpu_mem_monitor.go @@ -91,7 +91,10 @@ func (cm *CpuMemProfiler) Monitor() error { func (cm *CpuMemProfiler) ExportStats(timestamp string, st any) { se, err := hminternal.NewStatsExporter() if err != nil || se == nil { - log.Err("cpu_mem_monitor::ExportStats : Error in creating stats exporter instance [%v]", err) + log.Err( + "cpu_mem_monitor::ExportStats : Error in creating stats exporter instance [%v]", + err, + ) return } @@ -120,7 +123,11 @@ func (cm *CpuMemProfiler) getCpuMemoryUsage() (*hmcommon.CpuMemStat, error) { cliOut, err := exec.Command("bash", "-c", topCmd).Output() if err != nil { - log.Err("cpu_mem_monitor::getCpuMemoryUsage : Blobfuse2 is not running on pid %v [%v]", cm.pid, err) + log.Err( + "cpu_mem_monitor::getCpuMemoryUsage : Blobfuse2 is not running on pid %v [%v]", + cm.pid, + err, + ) return nil, err } @@ -132,7 +139,10 @@ func (cm *CpuMemProfiler) getCpuMemoryUsage() (*hmcommon.CpuMemStat, error) { cpuIndex, memIndex := getCpuMemIndex(processes[0]) stats := strings.Fields(processes[1]) - if cpuIndex == -1 || memIndex == -1 || len(stats) <= int(math.Max(float64(cpuIndex), float64(memIndex))) || len(stats[cpuIndex]) == 0 || len(stats[memIndex]) == 0 { + if cpuIndex == -1 || memIndex == -1 || + len(stats) <= int(math.Max(float64(cpuIndex), float64(memIndex))) || + len(stats[cpuIndex]) == 0 || + len(stats[memIndex]) == 0 { log.Debug("cpu_mem_monitor::getCpuMemoryUsage : %v", processes) log.Err("cpu_mem_monitor::getCpuMemoryUsage : Blobfuse2 is not running on pid %v", cm.pid) return nil, fmt.Errorf("blobfuse2 is not running on pid %v", cm.pid) @@ -143,7 +153,8 @@ func (cm *CpuMemProfiler) getCpuMemoryUsage() (*hmcommon.CpuMemStat, error) { MemUsage: stats[memIndex], } cpuMemStat.CpuUsage += "%" - if cpuMemStat.MemUsage[len(cpuMemStat.MemUsage)-1] >= '0' && cpuMemStat.MemUsage[len(cpuMemStat.MemUsage)-1] <= '9' { + if cpuMemStat.MemUsage[len(cpuMemStat.MemUsage)-1] >= '0' && + cpuMemStat.MemUsage[len(cpuMemStat.MemUsage)-1] <= '9' { cpuMemStat.MemUsage += "k" } @@ -152,11 +163,12 @@ func (cm *CpuMemProfiler) getCpuMemoryUsage() (*hmcommon.CpuMemStat, error) { func getCpuMemIndex(process string) (int, int) { cols := strings.Fields(process) - var cpuIndex, memIndex int = -1, -1 + var cpuIndex, memIndex = -1, -1 for i, col := range cols { - if col == "%CPU" { + switch col { + case "%CPU": cpuIndex = i - } else if col == "VIRT" { + case "VIRT": memIndex = i } } diff --git a/tools/health-monitor/monitor/file_cache/cache_monitor.go b/tools/health-monitor/monitor/file_cache/cache_monitor.go index 109dec9693..05538a2540 100644 --- a/tools/health-monitor/monitor/file_cache/cache_monitor.go +++ b/tools/health-monitor/monitor/file_cache/cache_monitor.go @@ -185,7 +185,9 @@ func (fc *FileCache) createEvent(event *watcher.Event) { delete(fc.cacheObj.fileRemovedMap, event.Path) fc.cacheObj.fileCreatedMap[event.Path] = event.Size() fc.cacheObj.cacheSize += event.Size() - fc.cacheObj.cacheConsumed = (float64)(fc.cacheObj.cacheSize*100) / (fc.maxSizeMB * common.MbToBytes) + fc.cacheObj.cacheConsumed = (float64)( + fc.cacheObj.cacheSize*100, + ) / (fc.maxSizeMB * common.MbToBytes) } e := fc.getCacheEventObj(event) @@ -201,7 +203,9 @@ func (fc *FileCache) removeEvent(event *watcher.Event) { delete(fc.cacheObj.fileCreatedMap, event.Path) fc.cacheObj.fileRemovedMap[event.Path] = event.Size() fc.cacheObj.cacheSize = int64(math.Max(0, float64(fc.cacheObj.cacheSize-event.Size()))) - fc.cacheObj.cacheConsumed = (float64)(fc.cacheObj.cacheSize*100) / (fc.maxSizeMB * common.MbToBytes) + fc.cacheObj.cacheConsumed = (float64)( + fc.cacheObj.cacheSize*100, + ) / (fc.maxSizeMB * common.MbToBytes) } e := fc.getCacheEventObj(event) @@ -220,7 +224,9 @@ func (fc *FileCache) chmodEvent(event *watcher.Event) { if fileSize != event.Size() { fc.cacheObj.cacheSize += event.Size() - fileSize fc.cacheObj.fileCreatedMap[event.Path] = event.Size() - fc.cacheObj.cacheConsumed = (float64)(fc.cacheObj.cacheSize*100) / (fc.maxSizeMB * common.MbToBytes) + fc.cacheObj.cacheConsumed = (float64)( + fc.cacheObj.cacheSize*100, + ) / (fc.maxSizeMB * common.MbToBytes) } } @@ -241,7 +247,9 @@ func (fc *FileCache) writeEvent(event *watcher.Event) { fc.cacheObj.cacheSize += event.Size() - fileSize fc.cacheObj.fileCreatedMap[event.Path] = event.Size() - fc.cacheObj.cacheConsumed = (float64)(fc.cacheObj.cacheSize*100) / (fc.maxSizeMB * common.MbToBytes) + fc.cacheObj.cacheConsumed = (float64)( + fc.cacheObj.cacheSize*100, + ) / (fc.maxSizeMB * common.MbToBytes) } else { return } diff --git a/tools/health-monitor/monitor/network_profiler/network_monitor.go b/tools/health-monitor/monitor/network_profiler/network_monitor.go index c28e99070d..e4d284a05e 100644 --- a/tools/health-monitor/monitor/network_profiler/network_monitor.go +++ b/tools/health-monitor/monitor/network_profiler/network_monitor.go @@ -68,7 +68,10 @@ func (nw *NetworkProfiler) Monitor() error { func (nw *NetworkProfiler) ExportStats(timestamp string, st any) { se, err := hminternal.NewStatsExporter() if err != nil || se == nil { - log.Err("network_monitor::ExportStats : Error in creating stats exporter instance [%v]", err) + log.Err( + "network_monitor::ExportStats : Error in creating stats exporter instance [%v]", + err, + ) return }