mirror of
https://github.com/fleetdm/fleet
synced 2026-04-21 13:37:30 +00:00
slog migration: initLogger + serve.go + cron + schedule (#40699)
<!-- Add the related story/sub-task/bug number, like Resolves #123, or remove if NA --> **Related issue:** Resolves #40540 Almost done with slog migration. # Checklist for submitter - [ ] Changes file added for user-visible changes in `changes/`, `orbit/changes/` or `ee/fleetd-chrome/changes`. - Changes present in previous PR ## Testing - [x] Added/updated automated tests - [x] QA'd all new/changed functionality manually <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **Chores** * Updated internal logging infrastructure to use Go's standard logging library, modernizing the logging system while maintaining existing functionality and error handling behavior. <!-- end of auto-generated comment: release notes by coderabbit.ai -->
This commit is contained in:
parent
6543d97f06
commit
bf9180e6e3
15 changed files with 302 additions and 313 deletions
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mdm/assets"
|
||||
maintained_apps "github.com/fleetdm/fleet/v4/server/mdm/maintainedapps"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/policies"
|
||||
"github.com/fleetdm/fleet/v4/server/service"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
|
|
@ -49,7 +48,7 @@ import (
|
|||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func errHandler(ctx context.Context, logger *logging.Logger, msg string, err error) {
|
||||
func errHandler(ctx context.Context, logger *slog.Logger, msg string, err error) {
|
||||
logger.ErrorContext(ctx, msg, "err", err)
|
||||
ctxerr.Handle(ctx, err)
|
||||
}
|
||||
|
|
@ -58,7 +57,7 @@ func newVulnerabilitiesSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
) (*schedule.Schedule, error) {
|
||||
const name = string(fleet.CronVulnerabilities)
|
||||
|
|
@ -82,7 +81,7 @@ func newVulnerabilitiesSchedule(
|
|||
func cronVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
) error {
|
||||
if config == nil {
|
||||
|
|
@ -117,7 +116,7 @@ func cronVulnerabilities(
|
|||
return nil
|
||||
}
|
||||
|
||||
func updateVulnHostCounts(ctx context.Context, ds fleet.Datastore, logger *logging.Logger, maxConcurrency int) error {
|
||||
func updateVulnHostCounts(ctx context.Context, ds fleet.Datastore, logger *slog.Logger, maxConcurrency int) error {
|
||||
ctx, span := tracer.Start(ctx, "vuln.update_host_counts")
|
||||
defer span.End()
|
||||
|
||||
|
|
@ -142,7 +141,7 @@ func updateVulnHostCounts(ctx context.Context, ds fleet.Datastore, logger *loggi
|
|||
func scanVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
appConfig *fleet.AppConfig,
|
||||
vulnPath string,
|
||||
|
|
@ -249,7 +248,7 @@ func scanVulnerabilities(
|
|||
if err := webhooks.TriggerVulnerabilitiesWebhook(
|
||||
automationCtx,
|
||||
ds,
|
||||
logger.SlogLogger().With("webhook", "vulnerabilities"),
|
||||
logger.With("webhook", "vulnerabilities"),
|
||||
args,
|
||||
mapper,
|
||||
); err != nil {
|
||||
|
|
@ -261,7 +260,7 @@ func scanVulnerabilities(
|
|||
if err := worker.QueueJiraVulnJobs(
|
||||
automationCtx,
|
||||
ds,
|
||||
logger.SlogLogger().With("jira", "vulnerabilities"),
|
||||
logger.With("jira", "vulnerabilities"),
|
||||
recentV,
|
||||
matchingMeta,
|
||||
); err != nil {
|
||||
|
|
@ -273,7 +272,7 @@ func scanVulnerabilities(
|
|||
if err := worker.QueueZendeskVulnJobs(
|
||||
automationCtx,
|
||||
ds,
|
||||
logger.SlogLogger().With("zendesk", "vulnerabilities"),
|
||||
logger.With("zendesk", "vulnerabilities"),
|
||||
recentV,
|
||||
matchingMeta,
|
||||
); err != nil {
|
||||
|
|
@ -292,14 +291,14 @@ func scanVulnerabilities(
|
|||
func checkCustomVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
collectVulns bool,
|
||||
startTime time.Time,
|
||||
) []fleet.SoftwareVulnerability {
|
||||
ctx, span := tracer.Start(ctx, "vuln.check_custom")
|
||||
defer span.End()
|
||||
|
||||
vulns, err := customcve.CheckCustomVulnerabilities(ctx, ds, logger.SlogLogger(), startTime)
|
||||
vulns, err := customcve.CheckCustomVulnerabilities(ctx, ds, logger, startTime)
|
||||
if err != nil {
|
||||
errHandler(ctx, logger, "checking custom vulnerabilities", err)
|
||||
}
|
||||
|
|
@ -316,7 +315,7 @@ func checkCustomVulnerabilities(
|
|||
func checkWinVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
vulnPath string,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
collectVulns bool,
|
||||
|
|
@ -352,7 +351,7 @@ func checkWinVulnerabilities(
|
|||
}
|
||||
|
||||
start := time.Now()
|
||||
r, err := msrc.Analyze(analyzeCtx, ds, o, vulnPath, collectVulns, logger.SlogLogger())
|
||||
r, err := msrc.Analyze(analyzeCtx, ds, o, vulnPath, collectVulns, logger)
|
||||
elapsed := time.Since(start)
|
||||
logger.DebugContext(analyzeCtx, "msrc-analysis-done",
|
||||
"os name", o.Name,
|
||||
|
|
@ -374,7 +373,7 @@ func checkWinVulnerabilities(
|
|||
func checkOvalVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
vulnPath string,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
collectVulns bool,
|
||||
|
|
@ -433,7 +432,7 @@ func checkOvalVulnerabilities(
|
|||
func checkGovalDictionaryVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
vulnPath string,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
collectVulns bool,
|
||||
|
|
@ -453,7 +452,7 @@ func checkGovalDictionaryVulnerabilities(
|
|||
if !config.DisableDataSync {
|
||||
// Sync on disk goval_dictionary sqlite with current OS Versions.
|
||||
refreshCtx, refreshSpan := tracer.Start(ctx, "vuln.goval_dictionary.refresh")
|
||||
downloaded, err := goval_dictionary.Refresh(refreshCtx, versions, vulnPath, logger.SlogLogger())
|
||||
downloaded, err := goval_dictionary.Refresh(refreshCtx, versions, vulnPath, logger)
|
||||
if err != nil {
|
||||
errHandler(refreshCtx, logger, "updating goval_dictionary databases", err)
|
||||
}
|
||||
|
|
@ -468,7 +467,7 @@ func checkGovalDictionaryVulnerabilities(
|
|||
trace.WithAttributes(attribute.Int("os_count", len(versions.OSVersions))))
|
||||
for _, version := range versions.OSVersions {
|
||||
start := time.Now()
|
||||
r, err := goval_dictionary.Analyze(analyzeCtx, ds, version, vulnPath, collectVulns, logger.SlogLogger())
|
||||
r, err := goval_dictionary.Analyze(analyzeCtx, ds, version, vulnPath, collectVulns, logger)
|
||||
if err != nil && errors.Is(err, goval_dictionary.ErrUnsupportedPlatform) {
|
||||
logger.DebugContext(analyzeCtx, "goval_dictionary-analysis-unsupported", "platform", version.Name)
|
||||
continue
|
||||
|
|
@ -491,7 +490,7 @@ func checkGovalDictionaryVulnerabilities(
|
|||
func checkNVDVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
vulnPath string,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
collectVulns bool,
|
||||
|
|
@ -509,7 +508,7 @@ func checkNVDVulnerabilities(
|
|||
CVEFeedPrefixURL: config.CVEFeedPrefixURL,
|
||||
CISAKnownExploitsURL: config.CISAKnownExploitsURL,
|
||||
}
|
||||
err := nvd.Sync(syncCtx, opts, logger.SlogLogger())
|
||||
err := nvd.Sync(syncCtx, opts, logger)
|
||||
if err != nil {
|
||||
errHandler(syncCtx, logger, "syncing vulnerability database", err)
|
||||
// don't return, continue on ...
|
||||
|
|
@ -518,14 +517,14 @@ func checkNVDVulnerabilities(
|
|||
}
|
||||
|
||||
loadCtx, loadSpan := tracer.Start(ctx, "vuln.nvd.load_cve_meta")
|
||||
if err := nvd.LoadCVEMeta(loadCtx, logger.SlogLogger(), vulnPath, ds); err != nil {
|
||||
if err := nvd.LoadCVEMeta(loadCtx, logger, vulnPath, ds); err != nil {
|
||||
errHandler(loadCtx, logger, "load cve meta", err)
|
||||
// don't return, continue on ...
|
||||
}
|
||||
loadSpan.End()
|
||||
|
||||
cpeCtx, cpeSpan := tracer.Start(ctx, "vuln.nvd.translate_software_to_cpe")
|
||||
err := nvd.TranslateSoftwareToCPE(cpeCtx, ds, vulnPath, logger.SlogLogger())
|
||||
err := nvd.TranslateSoftwareToCPE(cpeCtx, ds, vulnPath, logger)
|
||||
if err != nil {
|
||||
errHandler(cpeCtx, logger, "analyzing vulnerable software: Software->CPE", err)
|
||||
cpeSpan.End()
|
||||
|
|
@ -534,7 +533,7 @@ func checkNVDVulnerabilities(
|
|||
cpeSpan.End()
|
||||
|
||||
cveCtx, cveSpan := tracer.Start(ctx, "vuln.nvd.translate_cpe_to_cve")
|
||||
vulns, err := nvd.TranslateCPEToCVE(cveCtx, ds, vulnPath, logger.SlogLogger(), collectVulns, startTime)
|
||||
vulns, err := nvd.TranslateCPEToCVE(cveCtx, ds, vulnPath, logger, collectVulns, startTime)
|
||||
if err != nil {
|
||||
errHandler(cveCtx, logger, "analyzing vulnerable software: CPE->CVE", err)
|
||||
cveSpan.End()
|
||||
|
|
@ -548,7 +547,7 @@ func checkNVDVulnerabilities(
|
|||
func checkMacOfficeVulnerabilities(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
vulnPath string,
|
||||
config *config.VulnerabilitiesConfig,
|
||||
collectVulns bool,
|
||||
|
|
@ -588,7 +587,7 @@ func newAutomationsSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
intervalReload time.Duration,
|
||||
failingPoliciesSet fleet.FailingPolicySet,
|
||||
) (*schedule.Schedule, error) {
|
||||
|
|
@ -616,7 +615,7 @@ func newAutomationsSchedule(
|
|||
"host_status_webhook",
|
||||
func(ctx context.Context) error {
|
||||
return webhooks.TriggerHostStatusWebhook(
|
||||
ctx, ds, logger.SlogLogger().With("automation", "host_status"),
|
||||
ctx, ds, logger.With("automation", "host_status"),
|
||||
)
|
||||
},
|
||||
),
|
||||
|
|
@ -640,7 +639,7 @@ func newAutomationsSchedule(
|
|||
func scheduleFailingPoliciesAutomation(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
failingPoliciesSet fleet.FailingPolicySet,
|
||||
) error {
|
||||
for {
|
||||
|
|
@ -664,7 +663,7 @@ func scheduleFailingPoliciesAutomation(
|
|||
func triggerFailingPoliciesAutomation(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
failingPoliciesSet fleet.FailingPolicySet,
|
||||
) error {
|
||||
appConfig, err := ds.AppConfig(ctx)
|
||||
|
|
@ -676,19 +675,18 @@ func triggerFailingPoliciesAutomation(
|
|||
return fmt.Errorf("parsing appConfig.ServerSettings.ServerURL: %w", err)
|
||||
}
|
||||
|
||||
slogLogger := logger.SlogLogger()
|
||||
err = policies.TriggerFailingPoliciesAutomation(ctx, ds, slogLogger, failingPoliciesSet, func(policy *fleet.Policy, cfg policies.FailingPolicyAutomationConfig) error {
|
||||
err = policies.TriggerFailingPoliciesAutomation(ctx, ds, logger, failingPoliciesSet, func(policy *fleet.Policy, cfg policies.FailingPolicyAutomationConfig) error {
|
||||
switch cfg.AutomationType {
|
||||
case policies.FailingPolicyWebhook:
|
||||
return webhooks.SendFailingPoliciesBatchedPOSTs(
|
||||
ctx, policy, failingPoliciesSet, cfg.HostBatchSize, serverURL, cfg.WebhookURL, time.Now(), slogLogger)
|
||||
ctx, policy, failingPoliciesSet, cfg.HostBatchSize, serverURL, cfg.WebhookURL, time.Now(), logger)
|
||||
|
||||
case policies.FailingPolicyJira:
|
||||
hosts, err := failingPoliciesSet.ListHosts(policy.ID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrapf(ctx, err, "listing hosts for failing policies set %d", policy.ID)
|
||||
}
|
||||
if err := worker.QueueJiraFailingPolicyJob(ctx, ds, logger.SlogLogger(), policy, hosts); err != nil {
|
||||
if err := worker.QueueJiraFailingPolicyJob(ctx, ds, logger, policy, hosts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := failingPoliciesSet.RemoveHosts(policy.ID, hosts); err != nil {
|
||||
|
|
@ -700,7 +698,7 @@ func triggerFailingPoliciesAutomation(
|
|||
if err != nil {
|
||||
return ctxerr.Wrapf(ctx, err, "listing hosts for failing policies set %d", policy.ID)
|
||||
}
|
||||
if err := worker.QueueZendeskFailingPolicyJob(ctx, ds, logger.SlogLogger(), policy, hosts); err != nil {
|
||||
if err := worker.QueueZendeskFailingPolicyJob(ctx, ds, logger, policy, hosts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := failingPoliciesSet.RemoveHosts(policy.ID, hosts); err != nil {
|
||||
|
|
@ -720,7 +718,7 @@ func newWorkerIntegrationsSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
depStorage *mysql.NanoDEPStorage,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
bootstrapPackageStore fleet.MDMBootstrapPackageStore,
|
||||
|
|
@ -743,17 +741,17 @@ func newWorkerIntegrationsSchedule(
|
|||
// create the worker and register the Jira and Zendesk jobs even if no
|
||||
// integration is enabled, as that config can change live (and if it's not
|
||||
// there won't be any records to process so it will mostly just sleep).
|
||||
w := worker.NewWorker(ds, logger.SlogLogger())
|
||||
w := worker.NewWorker(ds, logger)
|
||||
// leave the url empty for now, will be filled when the lock is acquired with
|
||||
// the up-to-date config.
|
||||
jira := &worker.Jira{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
NewClientFunc: newJiraClient,
|
||||
}
|
||||
zendesk := &worker.Zendesk{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
NewClientFunc: newZendeskClient,
|
||||
}
|
||||
var (
|
||||
|
|
@ -764,34 +762,34 @@ func newWorkerIntegrationsSchedule(
|
|||
// we leave depSvc and deCli nil and macos setup assistants jobs will be
|
||||
// no-ops.
|
||||
if depStorage != nil {
|
||||
depSvc = apple_mdm.NewDEPService(ds, depStorage, logger.SlogLogger())
|
||||
depCli = apple_mdm.NewDEPClient(depStorage, ds, logger.SlogLogger())
|
||||
depSvc = apple_mdm.NewDEPService(ds, depStorage, logger)
|
||||
depCli = apple_mdm.NewDEPClient(depStorage, ds, logger)
|
||||
}
|
||||
macosSetupAsst := &worker.MacosSetupAssistant{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
DEPService: depSvc,
|
||||
DEPClient: depCli,
|
||||
}
|
||||
appleMDM := &worker.AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
Commander: commander,
|
||||
BootstrapPackageStore: bootstrapPackageStore,
|
||||
VPPInstaller: vppInstaller,
|
||||
}
|
||||
vppVerify := &worker.AppleSoftware{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
Commander: commander,
|
||||
}
|
||||
dbMigrate := &worker.DBMigration{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
}
|
||||
softwareWorker := &worker.SoftwareWorker{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
AndroidModule: androidModule,
|
||||
}
|
||||
w.Register(jira, zendesk, macosSetupAsst, appleMDM, dbMigrate, vppVerify, softwareWorker)
|
||||
|
|
@ -835,7 +833,7 @@ func newWorkerIntegrationsSchedule(
|
|||
return nil
|
||||
}),
|
||||
schedule.WithJob("dep_cooldowns", func(ctx context.Context) error {
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger.SlogLogger())
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -900,7 +898,7 @@ func newCleanupsAndAggregationSchedule(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
svc fleet.Service,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
enrollHostLimiter fleet.EnrollHostLimiter,
|
||||
config *config.FleetConfig,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
|
|
@ -1044,14 +1042,14 @@ func newCleanupsAndAggregationSchedule(
|
|||
schedule.WithJob(
|
||||
"renew_scep_certificates",
|
||||
func(ctx context.Context) error {
|
||||
return service.RenewSCEPCertificates(ctx, logger.SlogLogger(), ds, config, commander)
|
||||
return service.RenewSCEPCertificates(ctx, logger, ds, config, commander)
|
||||
},
|
||||
),
|
||||
schedule.WithJob("renew_host_mdm_managed_certificates", func(ctx context.Context) error {
|
||||
return ds.RenewMDMManagedCertificates(ctx)
|
||||
}),
|
||||
schedule.WithJob("renew_android_certificate_templates", func(ctx context.Context) error {
|
||||
return android_svc.RenewCertificateTemplates(ctx, ds, logger.SlogLogger())
|
||||
return android_svc.RenewCertificateTemplates(ctx, ds, logger)
|
||||
}),
|
||||
schedule.WithJob("query_results_cleanup", func(ctx context.Context) error {
|
||||
config, err := ds.AppConfig(ctx)
|
||||
|
|
@ -1156,7 +1154,7 @@ func newFrequentCleanupsSchedule(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
lq fleet.LiveQueryStore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronFrequentCleanups)
|
||||
|
|
@ -1195,7 +1193,7 @@ func newQueryResultsCleanupSchedule(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
liveQueryStore fleet.LiveQueryStore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronQueryResultsCleanup)
|
||||
|
|
@ -1229,29 +1227,29 @@ func newQueryResultsCleanupSchedule(
|
|||
|
||||
func verifyDiskEncryptionKeys(
|
||||
ctx context.Context,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
ds fleet.Datastore,
|
||||
) error {
|
||||
appCfg, err := ds.AppConfig(ctx)
|
||||
if err != nil {
|
||||
logger.Log("err", "unable to get app config", "details", err)
|
||||
logger.ErrorContext(ctx, "unable to get app config", "details", err)
|
||||
return ctxerr.Wrap(ctx, err, "fetching app config")
|
||||
}
|
||||
|
||||
if !appCfg.MDM.EnabledAndConfigured {
|
||||
logger.Log("inf", "skipping verification of macOS encryption keys as MDM is not fully configured")
|
||||
logger.InfoContext(ctx, "skipping verification of macOS encryption keys as MDM is not fully configured")
|
||||
return nil
|
||||
}
|
||||
|
||||
keys, err := ds.GetUnverifiedDiskEncryptionKeys(ctx)
|
||||
if err != nil {
|
||||
logger.Log("err", "unable to get unverified disk encryption keys from the database", "details", err)
|
||||
logger.ErrorContext(ctx, "unable to get unverified disk encryption keys from the database", "details", err)
|
||||
return err
|
||||
}
|
||||
|
||||
cert, err := assets.CAKeyPair(ctx, ds)
|
||||
if err != nil {
|
||||
logger.Log("err", "unable to get CA keypair", "details", err)
|
||||
logger.ErrorContext(ctx, "unable to get CA keypair", "details", err)
|
||||
return ctxerr.Wrap(ctx, err, "parsing SCEP keypair")
|
||||
}
|
||||
|
||||
|
|
@ -1270,23 +1268,22 @@ func verifyDiskEncryptionKeys(
|
|||
}
|
||||
|
||||
if err := ds.SetHostsDiskEncryptionKeyStatus(ctx, decryptable, true, latest); err != nil {
|
||||
logger.Log("err", "unable to update decryptable status", "details", err)
|
||||
logger.ErrorContext(ctx, "unable to update decryptable status", "details", err)
|
||||
return err
|
||||
}
|
||||
if err := ds.SetHostsDiskEncryptionKeyStatus(ctx, undecryptable, false, latest); err != nil {
|
||||
logger.Log("err", "unable to update decryptable status", "details", err)
|
||||
logger.ErrorContext(ctx, "unable to update decryptable status", "details", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newUsageStatisticsSchedule(ctx context.Context, instanceID string, ds fleet.Datastore, config config.FleetConfig, logger *logging.Logger) (*schedule.Schedule, error) {
|
||||
func newUsageStatisticsSchedule(ctx context.Context, instanceID string, ds fleet.Datastore, config config.FleetConfig, logger *slog.Logger) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronUsageStatistics)
|
||||
defaultInterval = 1 * time.Hour
|
||||
)
|
||||
slogLogger := logger.SlogLogger()
|
||||
s := schedule.New(
|
||||
ctx, name, instanceID, defaultInterval, ds, ds,
|
||||
schedule.WithLogger(logger.With("cron", name)),
|
||||
|
|
@ -1295,7 +1292,7 @@ func newUsageStatisticsSchedule(ctx context.Context, instanceID string, ds fleet
|
|||
func(ctx context.Context) error {
|
||||
// NOTE(mna): this is not a route from the fleet server (not in server/service/handler.go) so it
|
||||
// will not automatically support the /latest/ versioning. Leaving it as /v1/ for that reason.
|
||||
return trySendStatistics(ctx, ds, fleet.StatisticsFrequency, "https://fleetdm.com/api/v1/webhooks/receive-usage-analytics", config, slogLogger)
|
||||
return trySendStatistics(ctx, ds, fleet.StatisticsFrequency, "https://fleetdm.com/api/v1/webhooks/receive-usage-analytics", config, logger)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
|
@ -1342,7 +1339,7 @@ func newAppleMDMDEPProfileAssigner(
|
|||
periodicity time.Duration,
|
||||
ds fleet.Datastore,
|
||||
depStorage *mysql.NanoDEPStorage,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const name = string(fleet.CronAppleMDMDEPProfileAssigner)
|
||||
logger = logger.With("cron", name, "component", "nanodep-syncer")
|
||||
|
|
@ -1358,7 +1355,7 @@ func newAppleMDMDEPProfileAssigner(
|
|||
func appleMDMDEPSyncerJob(
|
||||
ds fleet.Datastore,
|
||||
depStorage *mysql.NanoDEPStorage,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) func(context.Context) error {
|
||||
var fleetSyncer *apple_mdm.DEPService
|
||||
return func(ctx context.Context) error {
|
||||
|
|
@ -1381,18 +1378,18 @@ func appleMDMDEPSyncerJob(
|
|||
return ctxerr.Wrap(ctx, err, "retrieving migrated ABM token")
|
||||
}
|
||||
if incompleteToken != nil {
|
||||
logger.Log("msg", "migrated ABM token found, updating its metadata")
|
||||
if err := apple_mdm.SetABMTokenMetadata(ctx, incompleteToken, depStorage, ds, logger.SlogLogger(), false); err != nil {
|
||||
logger.InfoContext(ctx, "migrated ABM token found, updating its metadata")
|
||||
if err := apple_mdm.SetABMTokenMetadata(ctx, incompleteToken, depStorage, ds, logger, false); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "updating migrated ABM token metadata")
|
||||
}
|
||||
if err := ds.SaveABMToken(ctx, incompleteToken); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "saving updated migrated ABM token")
|
||||
}
|
||||
logger.Log("msg", "completed migration of existing ABM token")
|
||||
logger.InfoContext(ctx, "completed migration of existing ABM token")
|
||||
}
|
||||
|
||||
if fleetSyncer == nil {
|
||||
fleetSyncer = apple_mdm.NewDEPService(ds, depStorage, logger.SlogLogger())
|
||||
fleetSyncer = apple_mdm.NewDEPService(ds, depStorage, logger)
|
||||
}
|
||||
|
||||
return fleetSyncer.RunAssigner(ctx)
|
||||
|
|
@ -1404,7 +1401,7 @@ func newAppleMDMProfileManagerSchedule(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronMDMAppleProfileManager)
|
||||
|
|
@ -1419,10 +1416,10 @@ func newAppleMDMProfileManagerSchedule(
|
|||
ctx, name, instanceID, defaultInterval, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("manage_apple_profiles", func(ctx context.Context) error {
|
||||
return service.ReconcileAppleProfiles(ctx, ds, commander, logger.SlogLogger())
|
||||
return service.ReconcileAppleProfiles(ctx, ds, commander, logger)
|
||||
}),
|
||||
schedule.WithJob("manage_apple_declarations", func(ctx context.Context) error {
|
||||
return service.ReconcileAppleDeclarations(ctx, ds, commander, logger.SlogLogger())
|
||||
return service.ReconcileAppleDeclarations(ctx, ds, commander, logger)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1433,7 +1430,7 @@ func newWindowsMDMProfileManagerSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronMDMWindowsProfileManager)
|
||||
|
|
@ -1448,7 +1445,7 @@ func newWindowsMDMProfileManagerSchedule(
|
|||
ctx, name, instanceID, defaultInterval, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("manage_windows_profiles", func(ctx context.Context) error {
|
||||
return service.ReconcileWindowsProfiles(ctx, ds, logger.SlogLogger())
|
||||
return service.ReconcileWindowsProfiles(ctx, ds, logger)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1459,7 +1456,7 @@ func newAndroidMDMProfileManagerSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
licenseKey string,
|
||||
androidAgentConfig config.AndroidAgentConfig,
|
||||
) (*schedule.Schedule, error) {
|
||||
|
|
@ -1473,7 +1470,7 @@ func newAndroidMDMProfileManagerSchedule(
|
|||
ctx, name, instanceID, defaultInterval, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("manage_android_profiles", func(ctx context.Context) error {
|
||||
return android_svc.ReconcileProfiles(ctx, ds, logger.SlogLogger(), licenseKey, androidAgentConfig)
|
||||
return android_svc.ReconcileProfiles(ctx, ds, logger, licenseKey, androidAgentConfig)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1485,7 +1482,7 @@ func newMDMAppleServiceDiscoverySchedule(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
depStorage *mysql.NanoDEPStorage,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
urlPrefix string,
|
||||
) (*schedule.Schedule, error) {
|
||||
const name = "mdm_service_discovery"
|
||||
|
|
@ -1495,7 +1492,7 @@ func newMDMAppleServiceDiscoverySchedule(
|
|||
ctx, name, instanceID, interval, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("mdm_apple_account_driven_enrollment_profile", func(ctx context.Context) error {
|
||||
return service.EnsureMDMAppleServiceDiscovery(ctx, ds, depStorage, logger.SlogLogger(), urlPrefix)
|
||||
return service.EnsureMDMAppleServiceDiscovery(ctx, ds, depStorage, logger, urlPrefix)
|
||||
}),
|
||||
)
|
||||
return s, nil
|
||||
|
|
@ -1506,7 +1503,7 @@ func newMDMAPNsPusher(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const name = string(fleet.CronAppleMDMAPNsPusher)
|
||||
|
||||
|
|
@ -1534,16 +1531,16 @@ func newMDMAPNsPusher(
|
|||
return nil
|
||||
}
|
||||
|
||||
return service.SendPushesToPendingDevices(ctx, ds, commander, logger.SlogLogger())
|
||||
return service.SendPushesToPendingDevices(ctx, ds, commander, logger)
|
||||
}),
|
||||
)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func cleanupCronStatsOnShutdown(ctx context.Context, ds fleet.Datastore, logger *logging.Logger, instanceID string) {
|
||||
func cleanupCronStatsOnShutdown(ctx context.Context, ds fleet.Datastore, logger *slog.Logger, instanceID string) {
|
||||
if err := ds.UpdateAllCronStatsForInstance(ctx, instanceID, fleet.CronStatsStatusPending, fleet.CronStatsStatusCanceled); err != nil {
|
||||
logger.Log("err", "cancel pending cron stats for instance", "details", err)
|
||||
logger.ErrorContext(ctx, "cancel pending cron stats for instance", "details", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1552,7 +1549,7 @@ func newActivitiesStreamingSchedule(
|
|||
instanceID string,
|
||||
activitySvc activity_api.Service,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
auditLogger activity_api.JSONLogger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
|
|
@ -1579,7 +1576,7 @@ func newHostVitalsLabelMembershipSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronHostVitalsLabelMembership)
|
||||
|
|
@ -1630,7 +1627,7 @@ func newBatchActivityCompletionCheckerSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronBatchActivityCompletionChecker)
|
||||
|
|
@ -1661,7 +1658,7 @@ func cronBatchActivityCompletionChecker(
|
|||
return nil
|
||||
}
|
||||
|
||||
func stringSliceToUintSlice(ctx context.Context, s []string, logger *logging.Logger) []uint {
|
||||
func stringSliceToUintSlice(ctx context.Context, s []string, logger *slog.Logger) []uint {
|
||||
result := make([]uint, 0, len(s))
|
||||
for _, v := range s {
|
||||
i, err := strconv.ParseUint(v, 10, 64)
|
||||
|
|
@ -1687,7 +1684,7 @@ func newIPhoneIPadRefetcher(
|
|||
periodicity time.Duration,
|
||||
ds fleet.Datastore,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
newActivityFn apple_mdm.NewActivityFunc,
|
||||
) (*schedule.Schedule, error) {
|
||||
const name = string(fleet.CronAppleMDMIPhoneIPadRefetcher)
|
||||
|
|
@ -1696,7 +1693,7 @@ func newIPhoneIPadRefetcher(
|
|||
ctx, name, instanceID, periodicity, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("cron_iphone_ipad_refetcher", func(ctx context.Context) error {
|
||||
return apple_mdm.IOSiPadOSRefetch(ctx, ds, commander, logger.SlogLogger(), newActivityFn)
|
||||
return apple_mdm.IOSiPadOSRefetch(ctx, ds, commander, logger, newActivityFn)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1710,7 +1707,7 @@ func cronUninstallSoftwareMigration(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
softwareInstallStore fleet.SoftwareInstallerStore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronUninstallSoftwareMigration)
|
||||
|
|
@ -1722,7 +1719,7 @@ func cronUninstallSoftwareMigration(
|
|||
schedule.WithLogger(logger),
|
||||
schedule.WithRunOnce(true),
|
||||
schedule.WithJob(name, func(ctx context.Context) error {
|
||||
return eeservice.UninstallSoftwareMigration(ctx, ds, softwareInstallStore, logger.SlogLogger())
|
||||
return eeservice.UninstallSoftwareMigration(ctx, ds, softwareInstallStore, logger)
|
||||
}),
|
||||
)
|
||||
return s, nil
|
||||
|
|
@ -1735,7 +1732,7 @@ func cronUpgradeCodeSoftwareMigration(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
softwareInstallStore fleet.SoftwareInstallerStore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronUpgradeCodeSoftwareMigration)
|
||||
|
|
@ -1750,7 +1747,7 @@ func cronUpgradeCodeSoftwareMigration(
|
|||
// ensures it runs a few seconds after Fleet is started
|
||||
schedule.WithDefaultPrevRunCreatedAt(time.Now().Add(priorJobDiff)),
|
||||
schedule.WithJob(name, func(ctx context.Context) error {
|
||||
return eeservice.UpgradeCodeMigration(ctx, ds, softwareInstallStore, logger.SlogLogger())
|
||||
return eeservice.UpgradeCodeMigration(ctx, ds, softwareInstallStore, logger)
|
||||
}),
|
||||
)
|
||||
return s, nil
|
||||
|
|
@ -1760,7 +1757,7 @@ func newMaintainedAppSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronMaintainedApps)
|
||||
|
|
@ -1775,7 +1772,7 @@ func newMaintainedAppSchedule(
|
|||
// ensures it runs a few seconds after Fleet is started
|
||||
schedule.WithDefaultPrevRunCreatedAt(time.Now().Add(priorJobDiff)),
|
||||
schedule.WithJob("refresh_maintained_apps", func(ctx context.Context) error {
|
||||
return maintained_apps.Refresh(ctx, ds, logger.SlogLogger())
|
||||
return maintained_apps.Refresh(ctx, ds, logger)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1786,7 +1783,7 @@ func newRefreshVPPAppVersionsSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
vppAppsConfig apple_apps.Config,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
|
|
@ -1815,7 +1812,7 @@ func newIPhoneIPadReviver(
|
|||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const name = string(fleet.CronAppleMDMIPhoneIPadReviver)
|
||||
logger = logger.With("cron", name, "component", "iphone-ipad-reviver")
|
||||
|
|
@ -1823,7 +1820,7 @@ func newIPhoneIPadReviver(
|
|||
ctx, name, instanceID, 1*time.Hour, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("cron_iphone_ipad_reviver", func(ctx context.Context) error {
|
||||
return apple_mdm.IOSiPadOSRevive(ctx, ds, commander, logger.SlogLogger())
|
||||
return apple_mdm.IOSiPadOSRevive(ctx, ds, commander, logger)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1834,7 +1831,7 @@ func newUpcomingActivitiesSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronUpcomingActivitiesMaintenance)
|
||||
|
|
@ -1857,7 +1854,7 @@ func newBatchActivitiesSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronScheduledBatchActivities)
|
||||
|
|
@ -1866,11 +1863,11 @@ func newBatchActivitiesSchedule(
|
|||
|
||||
logger = logger.With("cron", name)
|
||||
|
||||
w := worker.NewWorker(ds, logger.SlogLogger())
|
||||
w := worker.NewWorker(ds, logger)
|
||||
|
||||
scriptsJob := &worker.BatchScripts{
|
||||
Datastore: ds,
|
||||
Log: logger.SlogLogger(),
|
||||
Log: logger,
|
||||
}
|
||||
|
||||
w.Register(scriptsJob)
|
||||
|
|
@ -1896,7 +1893,7 @@ func newAndroidMDMDeviceReconcilerSchedule(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
licenseKey string,
|
||||
newActivityFn fleet.NewActivityFunc,
|
||||
) (*schedule.Schedule, error) {
|
||||
|
|
@ -1910,7 +1907,7 @@ func newAndroidMDMDeviceReconcilerSchedule(
|
|||
ctx, name, instanceID, defaultInterval, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("reconcile_android_devices", func(ctx context.Context) error {
|
||||
return android_svc.ReconcileAndroidDevices(ctx, ds, logger.SlogLogger(), licenseKey, newActivityFn)
|
||||
return android_svc.ReconcileAndroidDevices(ctx, ds, logger, licenseKey, newActivityFn)
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1921,7 +1918,7 @@ func cronEnableAndroidAppReportsOnDefaultPolicy(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
androidSvc android.Service,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
|
|
@ -1947,7 +1944,7 @@ func cronMigrateToPerHostPolicy(
|
|||
ctx context.Context,
|
||||
instanceID string,
|
||||
ds fleet.Datastore,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
androidSvc android.Service,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
|
@ -18,7 +19,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
|
||||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
mdmmock "github.com/fleetdm/fleet/v4/server/mock/mdm"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/test"
|
||||
)
|
||||
|
||||
|
|
@ -27,7 +27,7 @@ func TestNewAppleMDMProfileManagerWithoutConfig(t *testing.T) {
|
|||
mdmStorage := &mdmmock.MDMAppleStore{}
|
||||
ds := new(mock.Store)
|
||||
cmdr := apple_mdm.NewMDMAppleCommander(mdmStorage, nil)
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
sch, err := newAppleMDMProfileManagerSchedule(ctx, "foo", ds, cmdr, logger)
|
||||
require.NotNil(t, sch)
|
||||
|
|
@ -37,7 +37,7 @@ func TestNewAppleMDMProfileManagerWithoutConfig(t *testing.T) {
|
|||
func TestNewWindowsMDMProfileManagerWithoutConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ds := new(mock.Store)
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
sch, err := newWindowsMDMProfileManagerSchedule(ctx, "foo", ds, logger)
|
||||
require.NotNil(t, sch)
|
||||
|
|
@ -88,7 +88,7 @@ func TestMigrateABMTokenDuringDEPCronJob(t *testing.T) {
|
|||
err = depStorage.StoreConfig(ctx, apple_mdm.UnsavedABMTokenOrgName, &nanodep_client.Config{BaseURL: srv.URL})
|
||||
require.NoError(t, err)
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
syncFn := appleMDMDEPSyncerJob(ds, depStorage, logger)
|
||||
err = syncFn(ctx)
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
|
@ -135,16 +136,14 @@ func applyDevFlags(cfg *config.FleetConfig) {
|
|||
}
|
||||
}
|
||||
|
||||
// initLogger creates a *Logger backed by slog.
|
||||
// Returning the concrete type allows callers to access the underlying
|
||||
// slog.Logger via SlogLogger() when needed for migrated packages.
|
||||
func initLogger(cfg config.FleetConfig, loggerProvider *otelsdklog.LoggerProvider) *logging.Logger {
|
||||
slogLogger := logging.NewSlogLogger(logging.Options{
|
||||
// initLogger creates a *slog.Logger with the appropriate handler based on the
|
||||
// Fleet configuration (JSON format, debug level, tracing, OTEL logs).
|
||||
func initLogger(cfg config.FleetConfig, loggerProvider *otelsdklog.LoggerProvider) *slog.Logger {
|
||||
return logging.NewSlogLogger(logging.Options{
|
||||
JSON: cfg.Logging.JSON,
|
||||
Debug: cfg.Logging.Debug,
|
||||
TracingEnabled: cfg.Logging.TracingEnabled,
|
||||
OtelLogsEnabled: cfg.Logging.OtelLogsEnabled,
|
||||
LoggerProvider: loggerProvider,
|
||||
})
|
||||
return logging.NewLogger(slogLogger)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,6 @@ import (
|
|||
"github.com/getsentry/sentry-go"
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
kitprometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/google/uuid"
|
||||
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery"
|
||||
"github.com/ngrok/sqlmw"
|
||||
|
|
@ -252,7 +251,7 @@ the way that the Fleet server works.
|
|||
}
|
||||
|
||||
if dev_mode.IsEnabled {
|
||||
createTestBuckets(&config, logger)
|
||||
createTestBuckets(cmd.Context(), &config, logger)
|
||||
}
|
||||
|
||||
allowedHostIdentifiers := map[string]bool{
|
||||
|
|
@ -315,7 +314,7 @@ the way that the Fleet server works.
|
|||
var ds fleet.Datastore
|
||||
var carveStore fleet.CarveStore
|
||||
|
||||
opts := []mysql.DBOption{mysql.Logger(logger.SlogLogger()), mysql.WithFleetConfig(&config)}
|
||||
opts := []mysql.DBOption{mysql.Logger(logger), mysql.WithFleetConfig(&config)}
|
||||
if config.MysqlReadReplica.Address != "" {
|
||||
opts = append(opts, mysql.Replica(&config.MysqlReadReplica))
|
||||
}
|
||||
|
|
@ -429,7 +428,7 @@ the way that the Fleet server works.
|
|||
if err != nil {
|
||||
initFatal(err, "initialize Redis")
|
||||
}
|
||||
level.Info(logger).Log("component", "redis", "mode", redisPool.Mode())
|
||||
logger.InfoContext(cmd.Context(), "redis initialized", "component", "redis", "mode", redisPool.Mode())
|
||||
|
||||
ds = cached_mysql.New(ds)
|
||||
var dsOpts []mysqlredis.Option
|
||||
|
|
@ -440,9 +439,9 @@ the way that the Fleet server works.
|
|||
ds = redisWrapperDS
|
||||
|
||||
resultStore := pubsub.NewRedisQueryResults(redisPool, config.Redis.DuplicateResults,
|
||||
logger.SlogLogger().With("component", "query-results"),
|
||||
logger.With("component", "query-results"),
|
||||
)
|
||||
liveQueryStore := live_query.NewRedisLiveQuery(redisPool, logger.SlogLogger(), liveQueryMemCacheDuration)
|
||||
liveQueryStore := live_query.NewRedisLiveQuery(redisPool, logger, liveQueryMemCacheDuration)
|
||||
ssoSessionStore := sso.NewSessionStore(redisPool)
|
||||
|
||||
// Set common configuration for all logging.
|
||||
|
|
@ -511,7 +510,7 @@ the way that the Fleet server works.
|
|||
loggingConfig.KafkaREST.Topic = config.KafkaREST.StatusTopic
|
||||
loggingConfig.Nats.Subject = config.Nats.StatusSubject
|
||||
|
||||
osquerydStatusLogger, err := logging.NewJSONLogger(cmd.Context(), "status", loggingConfig, logger.SlogLogger())
|
||||
osquerydStatusLogger, err := logging.NewJSONLogger(cmd.Context(), "status", loggingConfig, logger)
|
||||
if err != nil {
|
||||
initFatal(err, "initializing osqueryd status logging")
|
||||
}
|
||||
|
|
@ -528,7 +527,7 @@ the way that the Fleet server works.
|
|||
loggingConfig.KafkaREST.Topic = config.KafkaREST.ResultTopic
|
||||
loggingConfig.Nats.Subject = config.Nats.ResultSubject
|
||||
|
||||
osquerydResultLogger, err := logging.NewJSONLogger(cmd.Context(), "result", loggingConfig, logger.SlogLogger())
|
||||
osquerydResultLogger, err := logging.NewJSONLogger(cmd.Context(), "result", loggingConfig, logger)
|
||||
if err != nil {
|
||||
initFatal(err, "initializing osqueryd result logging")
|
||||
}
|
||||
|
|
@ -546,7 +545,7 @@ the way that the Fleet server works.
|
|||
loggingConfig.KafkaREST.Topic = config.KafkaREST.AuditTopic
|
||||
loggingConfig.Nats.Subject = config.Nats.AuditSubject
|
||||
|
||||
auditLogger, err = logging.NewJSONLogger(cmd.Context(), "audit", loggingConfig, logger.SlogLogger())
|
||||
auditLogger, err = logging.NewJSONLogger(cmd.Context(), "audit", loggingConfig, logger)
|
||||
if err != nil {
|
||||
initFatal(err, "initializing audit logging")
|
||||
}
|
||||
|
|
@ -565,7 +564,7 @@ the way that the Fleet server works.
|
|||
if err != nil {
|
||||
initFatal(err, "initializing sentry")
|
||||
}
|
||||
level.Info(logger).Log("msg", "sentry initialized", "dsn", config.Sentry.Dsn)
|
||||
logger.InfoContext(cmd.Context(), "sentry initialized", "dsn", config.Sentry.Dsn)
|
||||
|
||||
defer sentry.Recover()
|
||||
defer sentry.Flush(2 * time.Second)
|
||||
|
|
@ -574,9 +573,9 @@ the way that the Fleet server works.
|
|||
var geoIP fleet.GeoIP
|
||||
geoIP = &fleet.NoOpGeoIP{}
|
||||
if config.GeoIP.DatabasePath != "" {
|
||||
maxmind, err := fleet.NewMaxMindGeoIP(logger.SlogLogger(), config.GeoIP.DatabasePath)
|
||||
maxmind, err := fleet.NewMaxMindGeoIP(logger, config.GeoIP.DatabasePath)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to initialize maxmind geoip, check database path", "database_path",
|
||||
logger.ErrorContext(cmd.Context(), "failed to initialize maxmind geoip, check database path", "database_path",
|
||||
config.GeoIP.DatabasePath, "error", err)
|
||||
} else {
|
||||
geoIP = maxmind
|
||||
|
|
@ -585,7 +584,7 @@ the way that the Fleet server works.
|
|||
|
||||
if config.MDM.EnableCustomOSUpdatesAndFileVault && !license.IsPremium() {
|
||||
config.MDM.EnableCustomOSUpdatesAndFileVault = false
|
||||
level.Warn(logger).Log("msg", "Disabling custom OS updates and FileVault management because Fleet Premium license is not present")
|
||||
logger.WarnContext(cmd.Context(), "Disabling custom OS updates and FileVault management because Fleet Premium license is not present")
|
||||
}
|
||||
|
||||
mdmStorage, err := mds.NewMDMAppleMDMStorage()
|
||||
|
|
@ -604,7 +603,7 @@ the way that the Fleet server works.
|
|||
}
|
||||
|
||||
var mdmPushService push.Pusher
|
||||
nanoMDMLogger := service.NewNanoMDMLogger(logger.With("component", "apple-mdm-push").SlogLogger())
|
||||
nanoMDMLogger := service.NewNanoMDMLogger(logger.With("component", "apple-mdm-push"))
|
||||
pushProviderFactory := buford.NewPushProviderFactory(buford.WithNewClient(func(cert *tls.Certificate) (*http.Client, error) {
|
||||
return fleethttp.NewClient(fleethttp.WithTLSClientConfig(&tls.Config{
|
||||
Certificates: []tls.Certificate{*cert},
|
||||
|
|
@ -648,7 +647,7 @@ the way that the Fleet server works.
|
|||
toInsert[fleet.MDMAssetAPNSCert] = struct{}{}
|
||||
toInsert[fleet.MDMAssetAPNSKey] = struct{}{}
|
||||
default:
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(cmd.Context(),
|
||||
"Your server already has stored APNs certificates. Fleet will ignore any certificates provided via environment variables when this happens.")
|
||||
}
|
||||
|
||||
|
|
@ -661,7 +660,7 @@ the way that the Fleet server works.
|
|||
toInsert[fleet.MDMAssetCACert] = struct{}{}
|
||||
toInsert[fleet.MDMAssetCAKey] = struct{}{}
|
||||
default:
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(cmd.Context(),
|
||||
"Your server already has stored SCEP certificates. Fleet will ignore any certificates provided via environment variables when this happens.")
|
||||
}
|
||||
|
||||
|
|
@ -701,7 +700,7 @@ the way that the Fleet server works.
|
|||
if err := ds.InsertMDMConfigAssets(context.Background(), args, nil); err != nil {
|
||||
if mysql.IsDuplicate(err) {
|
||||
// we already checked for existing assets so we should never have a duplicate key error here; we'll add a debug log just in case
|
||||
level.Debug(logger).Log("msg", "unexpected duplicate key error inserting MDM APNs and SCEP assets")
|
||||
logger.DebugContext(cmd.Context(), "unexpected duplicate key error inserting MDM APNs and SCEP assets")
|
||||
} else {
|
||||
initFatal(err, "inserting MDM APNs and SCEP assets")
|
||||
}
|
||||
|
|
@ -731,7 +730,7 @@ the way that the Fleet server works.
|
|||
toInsert = append(toInsert, fleet.MDMConfigAsset{Name: fleet.MDMAssetABMKey, Value: appleBM.KeyPEM},
|
||||
fleet.MDMConfigAsset{Name: fleet.MDMAssetABMCert, Value: appleBM.CertPEM})
|
||||
default:
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(cmd.Context(),
|
||||
"Your server already has stored ABM certificates and token. Fleet will ignore any certificates provided via environment variables when this happens.")
|
||||
}
|
||||
|
||||
|
|
@ -740,7 +739,7 @@ the way that the Fleet server works.
|
|||
switch {
|
||||
case err != nil && mysql.IsDuplicate(err):
|
||||
// we already checked for existing assets so we should never have a duplicate key error here; we'll add a debug log just in case
|
||||
level.Debug(logger).Log("msg", "unexpected duplicate key error inserting ABM assets")
|
||||
logger.DebugContext(cmd.Context(), "unexpected duplicate key error inserting ABM assets")
|
||||
case err != nil:
|
||||
initFatal(err, "inserting ABM assets")
|
||||
default:
|
||||
|
|
@ -794,10 +793,10 @@ the way that the Fleet server works.
|
|||
}
|
||||
}
|
||||
if appCfg.MDM.EnabledAndConfigured {
|
||||
level.Info(logger).Log("msg", "Apple MDM enabled")
|
||||
logger.InfoContext(cmd.Context(), "Apple MDM enabled")
|
||||
}
|
||||
if appCfg.MDM.AppleBMEnabledAndConfigured {
|
||||
level.Info(logger).Log("msg", "Apple Business Manager enabled")
|
||||
logger.InfoContext(cmd.Context(), "Apple Business Manager enabled")
|
||||
}
|
||||
|
||||
// register the Microsoft MDM services
|
||||
|
|
@ -827,12 +826,12 @@ the way that the Fleet server works.
|
|||
// if SMTP is already enabled then default the backend to empty string, which fill force load the SMTP implementation
|
||||
if config.Email.EmailBackend != "" {
|
||||
config.Email.EmailBackend = ""
|
||||
level.Warn(logger).Log("msg", "SMTP is already enabled, first disable SMTP to utilize a different email backend")
|
||||
logger.WarnContext(cmd.Context(), "SMTP is already enabled, first disable SMTP to utilize a different email backend")
|
||||
}
|
||||
}
|
||||
mailService, err := mail.NewService(config)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", err, "msg", "failed to configure mailing service")
|
||||
logger.ErrorContext(cmd.Context(), "failed to configure mailing service", "err", err)
|
||||
}
|
||||
|
||||
cronSchedules := fleet.NewCronSchedules()
|
||||
|
|
@ -843,8 +842,8 @@ the way that the Fleet server works.
|
|||
|
||||
// Channel used to trigger graceful shutdown on fatal DB errors (e.g. Aurora failover).
|
||||
dbFatalCh := make(chan error, 1)
|
||||
common_mysql.SetFatalErrorHandler(func(err error) {
|
||||
level.Error(logger).Log("msg", "fatal database error detected, initiating graceful shutdown", "err", err)
|
||||
common_mysql.SetFatalErrorHandler(func(ctx context.Context, err error) {
|
||||
logger.ErrorContext(ctx, "fatal database error detected, initiating graceful shutdown", "err", err)
|
||||
select {
|
||||
case dbFatalCh <- err:
|
||||
default:
|
||||
|
|
@ -870,16 +869,16 @@ the way that the Fleet server works.
|
|||
}
|
||||
}
|
||||
|
||||
eh := errorstore.NewHandler(ctx, redisPool, logger.SlogLogger(), config.Logging.ErrorRetentionPeriod)
|
||||
scepConfigMgr := eeservice.NewSCEPConfigService(logger.SlogLogger(), nil)
|
||||
digiCertService := digicert.NewService(digicert.WithLogger(logger.SlogLogger()))
|
||||
eh := errorstore.NewHandler(ctx, redisPool, logger, config.Logging.ErrorRetentionPeriod)
|
||||
scepConfigMgr := eeservice.NewSCEPConfigService(logger, nil)
|
||||
digiCertService := digicert.NewService(digicert.WithLogger(logger))
|
||||
ctx = ctxerr.NewContext(ctx, eh)
|
||||
|
||||
activitiesModule := activities.NewActivityModule()
|
||||
config.MDM.AndroidAgent.Validate(initFatal)
|
||||
androidSvc, err := android_service.NewService(
|
||||
ctx,
|
||||
logger.SlogLogger(),
|
||||
logger,
|
||||
ds,
|
||||
config.License.Key,
|
||||
config.Server.PrivateKey,
|
||||
|
|
@ -896,7 +895,7 @@ the way that the Fleet server works.
|
|||
ds,
|
||||
task,
|
||||
resultStore,
|
||||
logger.SlogLogger(),
|
||||
logger,
|
||||
&service.OsqueryLogger{
|
||||
Status: osquerydStatusLogger,
|
||||
Result: osquerydResultLogger,
|
||||
|
|
@ -930,11 +929,11 @@ the way that the Fleet server works.
|
|||
var softwareTitleIconStore fleet.SoftwareTitleIconStore
|
||||
var distributedLock fleet.Lock
|
||||
if license.IsPremium() {
|
||||
hydrantService := est.NewService(est.WithLogger(logger.SlogLogger()))
|
||||
hydrantService := est.NewService(est.WithLogger(logger))
|
||||
profileMatcher := apple_mdm.NewProfileMatcher(redisPool)
|
||||
if config.S3.SoftwareInstallersBucket != "" {
|
||||
if config.S3.BucketsAndPrefixesMatch() {
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(ctx,
|
||||
"the S3 buckets and prefixes for carves and software installers appear to be identical, this can cause issues")
|
||||
}
|
||||
// Extract the CloudFront URL signer before creating the S3 stores.
|
||||
|
|
@ -959,20 +958,20 @@ the way that the Fleet server works.
|
|||
initFatal(err, "initializing S3 software installer store")
|
||||
}
|
||||
softwareInstallStore = store
|
||||
level.Info(logger).Log("msg", "using S3 software installer store", "bucket", config.S3.SoftwareInstallersBucket)
|
||||
logger.InfoContext(ctx, "using S3 software installer store", "bucket", config.S3.SoftwareInstallersBucket)
|
||||
|
||||
bstore, err := s3.NewBootstrapPackageStore(config.S3)
|
||||
if err != nil {
|
||||
initFatal(err, "initializing S3 bootstrap package store")
|
||||
}
|
||||
bootstrapPackageStore = bstore
|
||||
level.Info(logger).Log("msg", "using S3 bootstrap package store", "bucket", config.S3.SoftwareInstallersBucket)
|
||||
logger.InfoContext(ctx, "using S3 bootstrap package store", "bucket", config.S3.SoftwareInstallersBucket)
|
||||
|
||||
softwareTitleIconStore, err = s3.NewSoftwareTitleIconStore(config.S3)
|
||||
if err != nil {
|
||||
initFatal(err, "initializing S3 software title icon store")
|
||||
}
|
||||
level.Info(logger).Log("msg", "using S3 software title icon store", "bucket", config.S3.SoftwareInstallersBucket)
|
||||
logger.InfoContext(ctx, "using S3 software title icon store", "bucket", config.S3.SoftwareInstallersBucket)
|
||||
} else {
|
||||
installerDir := os.TempDir()
|
||||
if dir := os.Getenv("FLEET_SOFTWARE_INSTALLER_STORE_DIR"); dir != "" {
|
||||
|
|
@ -980,11 +979,11 @@ the way that the Fleet server works.
|
|||
}
|
||||
store, err := filesystem.NewSoftwareInstallerStore(installerDir)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", err, "msg", "failed to configure local filesystem software installer store")
|
||||
logger.ErrorContext(ctx, "failed to configure local filesystem software installer store", "err", err)
|
||||
softwareInstallStore = failing.NewFailingSoftwareInstallerStore()
|
||||
} else {
|
||||
softwareInstallStore = store
|
||||
level.Info(logger).Log("msg",
|
||||
logger.InfoContext(ctx,
|
||||
"using local filesystem software installer store, this is not suitable for production use", "directory",
|
||||
installerDir)
|
||||
}
|
||||
|
|
@ -995,11 +994,11 @@ the way that the Fleet server works.
|
|||
}
|
||||
iconStore, err := filesystem.NewSoftwareTitleIconStore(iconDir)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("err", err, "msg", "failed to configure local filesystem software title icon store")
|
||||
logger.ErrorContext(ctx, "failed to configure local filesystem software title icon store", "err", err)
|
||||
softwareTitleIconStore = failing.NewFailingSoftwareTitleIconStore()
|
||||
} else {
|
||||
softwareTitleIconStore = iconStore
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(ctx,
|
||||
"using local filesystem software title icon store, this is not suitable for production use", "directory",
|
||||
iconDir)
|
||||
}
|
||||
|
|
@ -1009,7 +1008,7 @@ the way that the Fleet server works.
|
|||
svc, err = eeservice.NewService(
|
||||
svc,
|
||||
ds,
|
||||
logger.SlogLogger(),
|
||||
logger,
|
||||
config,
|
||||
mailService,
|
||||
clock.C,
|
||||
|
|
@ -1036,10 +1035,10 @@ the way that the Fleet server works.
|
|||
if err != nil {
|
||||
initFatal(errors.New("Error generating random instance identifier"), "")
|
||||
}
|
||||
level.Info(logger).Log("instanceID", instanceID)
|
||||
logger.InfoContext(ctx, "instance info", "instanceID", instanceID)
|
||||
|
||||
// Bootstrap activity bounded context (needed for cron schedules and HTTP routes)
|
||||
activitySvc, activityRoutes := createActivityBoundedContext(svc, dbConns, logger.SlogLogger())
|
||||
activitySvc, activityRoutes := createActivityBoundedContext(svc, dbConns, logger)
|
||||
// Inject the activity bounded context into the main service and activity module
|
||||
svc.SetActivityService(activitySvc)
|
||||
activitiesModule.SetService(activitySvc)
|
||||
|
|
@ -1049,11 +1048,11 @@ the way that the Fleet server works.
|
|||
// run or not (see https://github.com/fleetdm/fleet/issues/9486).
|
||||
go func() {
|
||||
cleanupCronStats := func() {
|
||||
level.Debug(logger).Log("msg", "cleaning up cron_stats")
|
||||
logger.DebugContext(ctx, "cleaning up cron_stats")
|
||||
// Datastore.CleanupCronStats should be safe to run by multiple fleet
|
||||
// instances at the same time and it should not be an expensive operation.
|
||||
if err := ds.CleanupCronStats(ctx); err != nil {
|
||||
level.Info(logger).Log("msg", "failed to clean up cron_stats", "err", err)
|
||||
logger.InfoContext(ctx, "failed to clean up cron_stats", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1142,10 +1141,10 @@ the way that the Fleet server works.
|
|||
vulnerabilityScheduleDisabled := false
|
||||
if config.Vulnerabilities.DisableSchedule {
|
||||
vulnerabilityScheduleDisabled = true
|
||||
level.Info(logger).Log("msg", "vulnerabilities schedule disabled via vulnerabilities.disable_schedule")
|
||||
logger.InfoContext(ctx, "vulnerabilities schedule disabled via vulnerabilities.disable_schedule")
|
||||
}
|
||||
if config.Vulnerabilities.CurrentInstanceChecks == "no" || config.Vulnerabilities.CurrentInstanceChecks == "0" {
|
||||
level.Info(logger).Log("msg", "vulnerabilities schedule disabled via vulnerabilities.current_instance_checks")
|
||||
logger.InfoContext(ctx, "vulnerabilities schedule disabled via vulnerabilities.current_instance_checks")
|
||||
vulnerabilityScheduleDisabled = true
|
||||
}
|
||||
if !vulnerabilityScheduleDisabled {
|
||||
|
|
@ -1330,10 +1329,10 @@ the way that the Fleet server works.
|
|||
initFatal(err, "failed to register batch activity completion checker schedule")
|
||||
}
|
||||
|
||||
level.Info(logger).Log("msg", fmt.Sprintf("started cron schedules: %s", strings.Join(cronSchedules.ScheduleNames(), ", ")))
|
||||
logger.InfoContext(ctx, fmt.Sprintf("started cron schedules: %s", strings.Join(cronSchedules.ScheduleNames(), ", ")))
|
||||
|
||||
// StartCollectors starts a goroutine per collector, using ctx to cancel.
|
||||
task.StartCollectors(ctx, logger.With("cron", "async_task").SlogLogger())
|
||||
task.StartCollectors(ctx, logger.With("cron", "async_task"))
|
||||
|
||||
// Flush seen hosts every second
|
||||
hostsAsyncCfg := config.Osquery.AsyncConfigForTask(configpkg.AsyncTaskHostLastSeen)
|
||||
|
|
@ -1341,10 +1340,7 @@ the way that the Fleet server works.
|
|||
go func() {
|
||||
for range time.Tick(time.Duration(rand.Intn(10)+1) * time.Second) {
|
||||
if err := task.FlushHostsLastSeen(baseCtx, clock.C.Now()); err != nil {
|
||||
level.Info(logger).Log(
|
||||
"err", err,
|
||||
"msg", "failed to update host seen times",
|
||||
)
|
||||
logger.InfoContext(ctx, "failed to update host seen times", "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
@ -1375,7 +1371,7 @@ the way that the Fleet server works.
|
|||
|
||||
var httpSigVerifier func(http.Handler) http.Handler
|
||||
if license.IsPremium() {
|
||||
httpSigVerifier, err = httpsig.Middleware(ds, config.Auth.RequireHTTPMessageSignature, logger.With("component", "http-sig-verifier").SlogLogger())
|
||||
httpSigVerifier, err = httpsig.Middleware(ds, config.Auth.RequireHTTPMessageSignature, logger.With("component", "http-sig-verifier"))
|
||||
if err != nil {
|
||||
initFatal(err, "initializing HTTP signature verifier")
|
||||
}
|
||||
|
|
@ -1385,10 +1381,10 @@ the way that the Fleet server works.
|
|||
{
|
||||
frontendHandler = service.PrometheusMetricsHandler(
|
||||
"get_frontend",
|
||||
service.ServeFrontend(config.Server.URLPrefix, config.Server.SandboxEnabled, httpLogger.SlogLogger()),
|
||||
service.ServeFrontend(config.Server.URLPrefix, config.Server.SandboxEnabled, httpLogger),
|
||||
)
|
||||
|
||||
frontendHandler = service.WithMDMEnrollmentMiddleware(svc, httpLogger.SlogLogger(), frontendHandler)
|
||||
frontendHandler = service.WithMDMEnrollmentMiddleware(svc, httpLogger, frontendHandler)
|
||||
|
||||
var extra []service.ExtraHandlerOption
|
||||
if config.MDM.SSORateLimitPerMinute > 0 {
|
||||
|
|
@ -1396,7 +1392,7 @@ the way that the Fleet server works.
|
|||
}
|
||||
extra = append(extra, service.WithHTTPSigVerifier(httpSigVerifier))
|
||||
|
||||
apiHandler = service.MakeHandler(svc, config, httpLogger.SlogLogger(), limiterStore, redisPool, carveStore,
|
||||
apiHandler = service.MakeHandler(svc, config, httpLogger, limiterStore, redisPool, carveStore,
|
||||
[]endpointer.HandlerRoutesFunc{android_service.GetRoutes(svc, androidSvc), activityRoutes}, extra...)
|
||||
|
||||
setupRequired, err := svc.SetupRequired(baseCtx)
|
||||
|
|
@ -1407,17 +1403,17 @@ the way that the Fleet server works.
|
|||
// By performing the same check inside main, we can make server startups
|
||||
// more efficient after the first startup.
|
||||
if setupRequired {
|
||||
apiHandler = service.WithSetup(svc, logger.SlogLogger(), apiHandler)
|
||||
frontendHandler = service.RedirectLoginToSetup(svc, logger.SlogLogger(), frontendHandler, config.Server.URLPrefix)
|
||||
apiHandler = service.WithSetup(svc, logger, apiHandler)
|
||||
frontendHandler = service.RedirectLoginToSetup(svc, logger, frontendHandler, config.Server.URLPrefix)
|
||||
} else {
|
||||
frontendHandler = service.RedirectSetupToLogin(svc, logger.SlogLogger(), frontendHandler, config.Server.URLPrefix)
|
||||
frontendHandler = service.RedirectSetupToLogin(svc, logger, frontendHandler, config.Server.URLPrefix)
|
||||
}
|
||||
|
||||
endUserEnrollOTAHandler = service.ServeEndUserEnrollOTA(
|
||||
svc,
|
||||
config.Server.URLPrefix,
|
||||
ds,
|
||||
logger.SlogLogger(),
|
||||
logger,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -1441,7 +1437,7 @@ the way that the Fleet server works.
|
|||
}
|
||||
|
||||
// Instantiate a gRPC service to handle launcher requests.
|
||||
launcher := launcher.New(svc, logger.SlogLogger(), grpc.NewServer(
|
||||
launcher := launcher.New(svc, logger, grpc.NewServer(
|
||||
grpc.ChainUnaryInterceptor(
|
||||
grpc_recovery.UnaryServerInterceptor(),
|
||||
),
|
||||
|
|
@ -1451,26 +1447,26 @@ the way that the Fleet server works.
|
|||
), healthCheckers)
|
||||
|
||||
rootMux := http.NewServeMux()
|
||||
rootMux.Handle("/healthz", service.PrometheusMetricsHandler("healthz", otelmw.WrapHandler(health.Handler(httpLogger.SlogLogger(), healthCheckers), "/healthz", config)))
|
||||
rootMux.Handle("/healthz", service.PrometheusMetricsHandler("healthz", otelmw.WrapHandler(health.Handler(httpLogger, healthCheckers), "/healthz", config)))
|
||||
rootMux.Handle("/version", service.PrometheusMetricsHandler("version", otelmw.WrapHandler(version.Handler(), "/version", config)))
|
||||
rootMux.Handle("/assets/", service.PrometheusMetricsHandler("static_assets", otelmw.WrapHandlerDynamic(service.ServeStaticAssets("/assets/"), config)))
|
||||
|
||||
if len(config.Server.PrivateKey) > 0 {
|
||||
commander := apple_mdm.NewMDMAppleCommander(mdmStorage, mdmPushService)
|
||||
ddmService := service.NewMDMAppleDDMService(ds, logger.SlogLogger())
|
||||
ddmService := service.NewMDMAppleDDMService(ds, logger)
|
||||
vppInstaller := svc.(fleet.AppleMDMVPPInstaller)
|
||||
mdmCheckinAndCommandService := service.NewMDMAppleCheckinAndCommandService(
|
||||
ds,
|
||||
commander,
|
||||
vppInstaller,
|
||||
license.IsPremium(),
|
||||
logger.SlogLogger(),
|
||||
logger,
|
||||
redis_key_value.New(redisPool),
|
||||
svc.NewActivity,
|
||||
)
|
||||
|
||||
mdmCheckinAndCommandService.RegisterResultsHandler("InstalledApplicationList", service.NewInstalledApplicationListResultsHandler(ds, commander, logger.SlogLogger(), config.Server.VPPVerifyTimeout, config.Server.VPPVerifyRequestDelay, svc.NewActivity))
|
||||
mdmCheckinAndCommandService.RegisterResultsHandler(fleet.DeviceLocationCmdName, service.NewDeviceLocationResultsHandler(ds, commander, logger.SlogLogger()))
|
||||
mdmCheckinAndCommandService.RegisterResultsHandler("InstalledApplicationList", service.NewInstalledApplicationListResultsHandler(ds, commander, logger, config.Server.VPPVerifyTimeout, config.Server.VPPVerifyRequestDelay, svc.NewActivity))
|
||||
mdmCheckinAndCommandService.RegisterResultsHandler(fleet.DeviceLocationCmdName, service.NewDeviceLocationResultsHandler(ds, commander, logger))
|
||||
|
||||
hasSCEPChallenge, err := checkMDMAssets([]fleet.MDMAssetName{fleet.MDMAssetSCEPChallenge})
|
||||
if err != nil {
|
||||
|
|
@ -1494,7 +1490,7 @@ the way that the Fleet server works.
|
|||
initFatal(err, "inserting SCEP challenge")
|
||||
}
|
||||
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(ctx,
|
||||
"Your server already has stored a SCEP challenge. Fleet will ignore this value provided via environment variables when this happens.")
|
||||
}
|
||||
}
|
||||
|
|
@ -1503,7 +1499,7 @@ the way that the Fleet server works.
|
|||
config.MDM,
|
||||
mdmStorage,
|
||||
scepStorage,
|
||||
logger.SlogLogger(),
|
||||
logger,
|
||||
mdmCheckinAndCommandService,
|
||||
ddmService,
|
||||
commander,
|
||||
|
|
@ -1516,37 +1512,37 @@ the way that the Fleet server works.
|
|||
|
||||
if license.IsPremium() {
|
||||
// SCEP proxy (for NDES, etc.)
|
||||
if err = service.RegisterSCEPProxy(rootMux, ds, logger.SlogLogger(), nil, &config); err != nil {
|
||||
if err = service.RegisterSCEPProxy(rootMux, ds, logger, nil, &config); err != nil {
|
||||
initFatal(err, "setup SCEP proxy")
|
||||
}
|
||||
if err = scim.RegisterSCIM(rootMux, ds, svc, logger.SlogLogger(), &config); err != nil {
|
||||
if err = scim.RegisterSCIM(rootMux, ds, svc, logger, &config); err != nil {
|
||||
initFatal(err, "setup SCIM")
|
||||
}
|
||||
// Host identify and conditional access SCEP feature only works if a private key has been set up
|
||||
if len(config.Server.PrivateKey) > 0 {
|
||||
hostIdentitySCEPDepot, err := mds.NewHostIdentitySCEPDepot(logger.SlogLogger().With("component", "host-id-scep-depot"), &config)
|
||||
hostIdentitySCEPDepot, err := mds.NewHostIdentitySCEPDepot(logger.With("component", "host-id-scep-depot"), &config)
|
||||
if err != nil {
|
||||
initFatal(err, "setup host identity SCEP depot")
|
||||
}
|
||||
if err = hostidentity.RegisterSCEP(rootMux, hostIdentitySCEPDepot, ds, logger.SlogLogger(), &config); err != nil {
|
||||
if err = hostidentity.RegisterSCEP(rootMux, hostIdentitySCEPDepot, ds, logger, &config); err != nil {
|
||||
initFatal(err, "setup host identity SCEP")
|
||||
}
|
||||
|
||||
// Conditional Access SCEP
|
||||
condAccessSCEPDepot, err := mds.NewConditionalAccessSCEPDepot(logger.SlogLogger().With("component", "conditional-access-scep-depot"), &config)
|
||||
condAccessSCEPDepot, err := mds.NewConditionalAccessSCEPDepot(logger.With("component", "conditional-access-scep-depot"), &config)
|
||||
if err != nil {
|
||||
initFatal(err, "setup conditional access SCEP depot")
|
||||
}
|
||||
if err = condaccess.RegisterSCEP(ctx, rootMux, condAccessSCEPDepot, ds, logger.SlogLogger(), &config); err != nil {
|
||||
if err = condaccess.RegisterSCEP(ctx, rootMux, condAccessSCEPDepot, ds, logger, &config); err != nil {
|
||||
initFatal(err, "setup conditional access SCEP")
|
||||
}
|
||||
|
||||
// Conditional Access IdP (Okta)
|
||||
if err = condaccess.RegisterIdP(rootMux, ds, logger.SlogLogger(), &config); err != nil {
|
||||
if err = condaccess.RegisterIdP(rootMux, ds, logger, &config); err != nil {
|
||||
initFatal(err, "setup conditional access IdP")
|
||||
}
|
||||
} else {
|
||||
level.Warn(logger).Log("msg",
|
||||
logger.WarnContext(ctx,
|
||||
"Host identity and conditional access SCEP is not available because no server private key has been set up.")
|
||||
}
|
||||
}
|
||||
|
|
@ -1559,10 +1555,10 @@ the way that the Fleet server works.
|
|||
))
|
||||
} else {
|
||||
if config.Prometheus.BasicAuth.Disable {
|
||||
level.Info(logger).Log("msg", "metrics endpoint enabled with http basic auth disabled")
|
||||
logger.InfoContext(ctx, "metrics endpoint enabled with http basic auth disabled")
|
||||
rootMux.Handle("/metrics", service.PrometheusMetricsHandler("metrics", otelmw.WrapHandler(promhttp.Handler(), "/metrics", config)))
|
||||
} else {
|
||||
level.Info(logger).Log("msg", "metrics endpoint disabled (http basic auth credentials not set)")
|
||||
logger.InfoContext(ctx, "metrics endpoint disabled (http basic auth credentials not set)")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1584,8 +1580,8 @@ the way that the Fleet server works.
|
|||
// add an additional 30 seconds to prevent race conditions where the
|
||||
// request is terminated early.
|
||||
if err := rc.SetWriteDeadline(time.Now().Add(scripts.MaxServerWaitTime + (30 * time.Second))); err != nil {
|
||||
level.Error(logger).Log(
|
||||
"msg", "http middleware failed to override endpoint write timeout for script sync run",
|
||||
logger.ErrorContext(req.Context(),
|
||||
"http middleware failed to override endpoint write timeout for script sync run",
|
||||
"response_writer_type", fmt.Sprintf("%T", rw),
|
||||
"response_writer", fmt.Sprintf("%+v", rw),
|
||||
"err", err,
|
||||
|
|
@ -1608,8 +1604,8 @@ the way that the Fleet server works.
|
|||
// TODO: Is this really how we want to handle this? Or would an arbitrarily long
|
||||
// timeout be better?
|
||||
if err := rc.SetReadDeadline(zeroTime); err != nil {
|
||||
level.Error(logger).Log(
|
||||
"msg", "http middleware failed to override endpoint read timeout for software package upload",
|
||||
logger.ErrorContext(req.Context(),
|
||||
"http middleware failed to override endpoint read timeout for software package upload",
|
||||
"response_writer_type", fmt.Sprintf("%T", rw),
|
||||
"response_writer", fmt.Sprintf("%+v", rw),
|
||||
"err", err,
|
||||
|
|
@ -1622,8 +1618,8 @@ the way that the Fleet server works.
|
|||
// TODO: Is this really how we want to handle this? Or would an arbitrarily long
|
||||
// timeout be better?
|
||||
if err := rc.SetWriteDeadline(zeroTime); err != nil {
|
||||
level.Error(logger).Log(
|
||||
"msg", "http middleware failed to override endpoint write timeout for software package upload",
|
||||
logger.ErrorContext(req.Context(),
|
||||
"http middleware failed to override endpoint write timeout for software package upload",
|
||||
"response_writer_type", fmt.Sprintf("%T", rw),
|
||||
"response_writer", fmt.Sprintf("%+v", rw),
|
||||
"err", err,
|
||||
|
|
@ -1640,8 +1636,8 @@ the way that the Fleet server works.
|
|||
// When enabling Android MDM, frontend UI will wait for the admin to finish the setup in Google.
|
||||
rc := http.NewResponseController(rw)
|
||||
if err := rc.SetWriteDeadline(time.Now().Add(30 * time.Minute)); err != nil {
|
||||
level.Error(logger).Log(
|
||||
"msg", "http middleware failed to override endpoint write timeout for android enterpriset setup",
|
||||
logger.ErrorContext(req.Context(),
|
||||
"http middleware failed to override endpoint write timeout for android enterpriset setup",
|
||||
"response_writer_type", fmt.Sprintf("%T", rw),
|
||||
"response_writer", fmt.Sprintf("%+v", rw),
|
||||
"err", err,
|
||||
|
|
@ -1657,16 +1653,16 @@ the way that the Fleet server works.
|
|||
// across a large number of hosts, so set the timeouts a bit higher than default
|
||||
rc := http.NewResponseController(rw)
|
||||
if err := rc.SetWriteDeadline(time.Now().Add(5 * time.Minute)); err != nil {
|
||||
level.Error(logger).Log(
|
||||
"msg", "http middleware failed to override endpoint write timeout for MDM profiles batch endpoint",
|
||||
logger.ErrorContext(req.Context(),
|
||||
"http middleware failed to override endpoint write timeout for MDM profiles batch endpoint",
|
||||
"response_writer_type", fmt.Sprintf("%T", rw),
|
||||
"response_writer", fmt.Sprintf("%+v", rw),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
if err := rc.SetReadDeadline(time.Now().Add(5 * time.Minute)); err != nil {
|
||||
level.Error(logger).Log(
|
||||
"msg", "http middleware failed to override endpoint read timeout for MDM profiles batch endpoint",
|
||||
logger.ErrorContext(req.Context(),
|
||||
"http middleware failed to override endpoint read timeout for MDM profiles batch endpoint",
|
||||
"response_writer_type", fmt.Sprintf("%T", rw),
|
||||
"response_writer", fmt.Sprintf("%+v", rw),
|
||||
"err", err,
|
||||
|
|
@ -1686,7 +1682,7 @@ the way that the Fleet server works.
|
|||
rootMux.Handle("/", otelmw.WrapHandler(frontendHandler, "/", config))
|
||||
|
||||
debugHandler := &debugMux{
|
||||
fleetAuthenticatedHandler: service.MakeDebugHandler(svc, config, logger.SlogLogger(), eh, ds),
|
||||
fleetAuthenticatedHandler: service.MakeDebugHandler(svc, config, logger, eh, ds),
|
||||
}
|
||||
rootMux.Handle("/debug/", otelmw.WrapHandlerDynamic(debugHandler, config))
|
||||
|
||||
|
|
@ -1715,7 +1711,7 @@ the way that the Fleet server works.
|
|||
if v := os.Getenv("FLEET_LIVE_QUERY_REST_PERIOD"); v != "" {
|
||||
duration, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("live_query_rest_period_err", err)
|
||||
logger.ErrorContext(ctx, "failed to parse live query rest period", "err", err)
|
||||
} else {
|
||||
liveQueryRestPeriod = duration
|
||||
}
|
||||
|
|
@ -1745,10 +1741,10 @@ the way that the Fleet server works.
|
|||
errs := make(chan error, 2)
|
||||
go func() {
|
||||
if !config.Server.TLS {
|
||||
logger.Log("transport", "http", "address", config.Server.Address, "msg", "listening")
|
||||
logger.InfoContext(ctx, "listening", "transport", "http", "address", config.Server.Address)
|
||||
errs <- srv.ListenAndServe()
|
||||
} else {
|
||||
logger.Log("transport", "https", "address", config.Server.Address, "msg", "listening")
|
||||
logger.InfoContext(ctx, "listening", "transport", "https", "address", config.Server.Address)
|
||||
srv.TLSConfig = getTLSConfig(config.Server.TLSProfile)
|
||||
errs <- srv.ListenAndServeTLS(
|
||||
config.Server.Cert,
|
||||
|
|
@ -1772,17 +1768,17 @@ the way that the Fleet server works.
|
|||
// Flush any pending OTEL data before shutting down
|
||||
if tracerProvider != nil {
|
||||
if err := tracerProvider.Shutdown(ctx); err != nil {
|
||||
level.Error(logger).Log("msg", "failed to shutdown OTEL tracer provider", "err", err)
|
||||
logger.ErrorContext(ctx, "failed to shutdown OTEL tracer provider", "err", err)
|
||||
}
|
||||
}
|
||||
if meterProvider != nil {
|
||||
if err := meterProvider.Shutdown(ctx); err != nil {
|
||||
level.Error(logger).Log("msg", "failed to shutdown OTEL meter provider", "err", err)
|
||||
logger.ErrorContext(ctx, "failed to shutdown OTEL meter provider", "err", err)
|
||||
}
|
||||
}
|
||||
if loggerProvider != nil {
|
||||
if err := loggerProvider.Shutdown(ctx); err != nil {
|
||||
level.Error(logger).Log("msg", "failed to shutdown OTEL logger provider", "err", err)
|
||||
logger.ErrorContext(ctx, "failed to shutdown OTEL logger provider", "err", err)
|
||||
}
|
||||
}
|
||||
return srv.Shutdown(ctx)
|
||||
|
|
@ -1790,7 +1786,7 @@ the way that the Fleet server works.
|
|||
}()
|
||||
|
||||
// block on errs signal
|
||||
logger.Log("terminated", <-errs)
|
||||
logger.InfoContext(ctx, "terminated", "err", <-errs)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -1963,39 +1959,39 @@ func getTLSConfig(profile string) *tls.Config {
|
|||
type devSQLInterceptor struct {
|
||||
sqlmw.NullInterceptor
|
||||
|
||||
logger *platform_logging.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (in *devSQLInterceptor) ConnQueryContext(ctx context.Context, conn driver.QueryerContext, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
start := time.Now()
|
||||
rows, err := conn.QueryContext(ctx, query, args)
|
||||
in.logQuery(start, query, args, err)
|
||||
in.logQuery(ctx, start, query, args, err)
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (in *devSQLInterceptor) StmtQueryContext(ctx context.Context, stmt driver.StmtQueryContext, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
start := time.Now()
|
||||
rows, err := stmt.QueryContext(ctx, args)
|
||||
in.logQuery(start, query, args, err)
|
||||
in.logQuery(ctx, start, query, args, err)
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (in *devSQLInterceptor) StmtExecContext(ctx context.Context, stmt driver.StmtExecContext, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
start := time.Now()
|
||||
result, err := stmt.ExecContext(ctx, args)
|
||||
in.logQuery(start, query, args, err)
|
||||
in.logQuery(ctx, start, query, args, err)
|
||||
return result, err
|
||||
}
|
||||
|
||||
var spaceRegex = regexp.MustCompile(`\s+`)
|
||||
|
||||
func (in *devSQLInterceptor) logQuery(start time.Time, query string, args []driver.NamedValue, err error) {
|
||||
logLevel := level.Debug
|
||||
if err != nil {
|
||||
logLevel = level.Error
|
||||
}
|
||||
func (in *devSQLInterceptor) logQuery(ctx context.Context, start time.Time, query string, args []driver.NamedValue, err error) {
|
||||
query = strings.TrimSpace(spaceRegex.ReplaceAllString(query, " "))
|
||||
logLevel(in.logger).Log("duration", time.Since(start), "query", query, "args", argsToString(args), "err", err)
|
||||
if err != nil {
|
||||
in.logger.ErrorContext(ctx, "sql query", "duration", time.Since(start), "query", query, "args", argsToString(args), "err", err)
|
||||
} else {
|
||||
in.logger.DebugContext(ctx, "sql query", "duration", time.Since(start), "query", query, "args", argsToString(args))
|
||||
}
|
||||
}
|
||||
|
||||
func argsToString(args []driver.NamedValue) string {
|
||||
|
|
@ -2043,16 +2039,15 @@ func (n nopPusher) Push(context.Context, []string) (map[string]*push.Response, e
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func createTestBuckets(config *configpkg.FleetConfig, logger *platform_logging.Logger) {
|
||||
func createTestBuckets(ctx context.Context, config *configpkg.FleetConfig, logger *slog.Logger) {
|
||||
softwareInstallerStore, err := s3.NewSoftwareInstallerStore(config.S3)
|
||||
if err != nil {
|
||||
initFatal(err, "initializing S3 software installer store")
|
||||
}
|
||||
if err := softwareInstallerStore.CreateTestBucket(context.Background(), config.S3.SoftwareInstallersBucket); err != nil {
|
||||
if err := softwareInstallerStore.CreateTestBucket(ctx, config.S3.SoftwareInstallersBucket); err != nil {
|
||||
// Don't panic, allow devs to run Fleet without S3 dependency.
|
||||
level.Info(logger).Log(
|
||||
logger.InfoContext(ctx, "failed to create test software installer bucket",
|
||||
"err", err,
|
||||
"msg", "failed to create test software installer bucket",
|
||||
"name", config.S3.SoftwareInstallersBucket,
|
||||
)
|
||||
}
|
||||
|
|
@ -2060,11 +2055,10 @@ func createTestBuckets(config *configpkg.FleetConfig, logger *platform_logging.L
|
|||
if err != nil {
|
||||
initFatal(err, "initializing S3 carve store")
|
||||
}
|
||||
if err := carveStore.CreateTestBucket(context.Background(), config.S3.CarvesBucket); err != nil {
|
||||
if err := carveStore.CreateTestBucket(ctx, config.S3.CarvesBucket); err != nil {
|
||||
// Don't panic, allow devs to run Fleet without S3 dependency.
|
||||
level.Info(logger).Log(
|
||||
logger.InfoContext(ctx, "failed to create test carve bucket",
|
||||
"err", err,
|
||||
"msg", "failed to create test carve bucket",
|
||||
"name", config.S3.CarvesBucket,
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import (
|
|||
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/tokenpki"
|
||||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
"github.com/fleetdm/fleet/v4/server/service"
|
||||
"github.com/fleetdm/fleet/v4/server/service/schedule"
|
||||
|
|
@ -298,7 +297,7 @@ func TestAutomationsSchedule(t *testing.T) {
|
|||
defer cancelFunc()
|
||||
|
||||
failingPoliciesSet := service.NewMemFailingPolicySet()
|
||||
s, err := newAutomationsSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), 5*time.Minute, failingPoliciesSet)
|
||||
s, err := newAutomationsSchedule(ctx, "test_instance", ds, slog.New(slog.DiscardHandler), 5*time.Minute, failingPoliciesSet)
|
||||
require.NoError(t, err)
|
||||
s.Start()
|
||||
|
||||
|
|
@ -358,7 +357,7 @@ func TestCronVulnerabilitiesCreatesDatabasesPath(t *testing.T) {
|
|||
// Use schedule to test that the schedule does indeed call cronVulnerabilities.
|
||||
ctx = license.NewContext(ctx, &fleet.LicenseInfo{Tier: fleet.TierPremium})
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
lg := logging.NewNopLogger()
|
||||
lg := slog.New(slog.DiscardHandler)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
|
|
@ -419,7 +418,7 @@ func (f *softwareIterator) Close() error { return nil }
|
|||
func TestScanVulnerabilities(t *testing.T) {
|
||||
nettest.Run(t)
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
|
@ -610,7 +609,7 @@ func TestScanVulnerabilities(t *testing.T) {
|
|||
func TestScanVulnerabilitiesFreeTier(t *testing.T) {
|
||||
nettest.Run(t)
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
|
@ -778,7 +777,7 @@ func TestScanVulnerabilitiesFreeTier(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestUpdateVulnHostCounts(t *testing.T) {
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
|
@ -822,7 +821,7 @@ func TestUpdateVulnHostCounts(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestScanVulnerabilitiesMkdirFailsIfVulnPathIsFile(t *testing.T) {
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
|
|
@ -899,7 +898,7 @@ func TestCronVulnerabilitiesSkipMkdirIfDisabled(t *testing.T) {
|
|||
// Use schedule to test that the schedule does indeed call cronVulnerabilities.
|
||||
ctx = license.NewContext(ctx, &fleet.LicenseInfo{Tier: fleet.TierPremium})
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s, err := newVulnerabilitiesSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), &config)
|
||||
s, err := newVulnerabilitiesSchedule(ctx, "test_instance", ds, slog.New(slog.DiscardHandler), &config)
|
||||
require.NoError(t, err)
|
||||
s.Start()
|
||||
t.Cleanup(func() {
|
||||
|
|
@ -984,7 +983,7 @@ func TestAutomationsScheduleLockDuration(t *testing.T) {
|
|||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
|
||||
s, err := newAutomationsSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), 1*time.Second, service.NewMemFailingPolicySet())
|
||||
s, err := newAutomationsSchedule(ctx, "test_instance", ds, slog.New(slog.DiscardHandler), 1*time.Second, service.NewMemFailingPolicySet())
|
||||
require.NoError(t, err)
|
||||
s.Start()
|
||||
|
||||
|
|
@ -1051,7 +1050,7 @@ func TestAutomationsScheduleIntervalChange(t *testing.T) {
|
|||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
defer cancelFunc()
|
||||
|
||||
s, err := newAutomationsSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), 200*time.Millisecond, service.NewMemFailingPolicySet())
|
||||
s, err := newAutomationsSchedule(ctx, "test_instance", ds, slog.New(slog.DiscardHandler), 200*time.Millisecond, service.NewMemFailingPolicySet())
|
||||
require.NoError(t, err)
|
||||
s.Start()
|
||||
|
||||
|
|
@ -1198,7 +1197,7 @@ func TestDebugMux(t *testing.T) {
|
|||
func TestVerifyDiskEncryptionKeysJob(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := logging.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
testCert, testKey, err := apple_mdm.NewSCEPCACertKey()
|
||||
require.NoError(t, err)
|
||||
|
|
|
|||
|
|
@ -4,17 +4,16 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
"github.com/fleetdm/fleet/v4/server/dev_mode"
|
||||
|
||||
"github.com/WatchBeam/clock"
|
||||
"github.com/fleetdm/fleet/v4/server/config"
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
|
||||
"github.com/fleetdm/fleet/v4/server/dev_mode"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
|
@ -50,7 +49,7 @@ by an exit code of zero.`,
|
|||
fleet.WriteExpiredLicenseBanner(os.Stderr)
|
||||
}
|
||||
|
||||
ds, err := mysql.New(cfg.Mysql, clock.C, mysql.Logger(logger.SlogLogger()))
|
||||
ds, err := mysql.New(cfg.Mysql, clock.C, mysql.Logger(logger))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -134,7 +133,7 @@ by an exit code of zero.`,
|
|||
return vulnProcessingCmd
|
||||
}
|
||||
|
||||
func configureVulnPath(ctx context.Context, vulnConfig config.VulnerabilitiesConfig, appConfig *fleet.AppConfig, logger *logging.Logger) (vulnPath string) {
|
||||
func configureVulnPath(ctx context.Context, vulnConfig config.VulnerabilitiesConfig, appConfig *fleet.AppConfig, logger *slog.Logger) (vulnPath string) {
|
||||
switch {
|
||||
case vulnConfig.DatabasesPath != "" && appConfig != nil && appConfig.VulnerabilitySettings.DatabasesPath != "":
|
||||
vulnPath = vulnConfig.DatabasesPath
|
||||
|
|
@ -156,7 +155,7 @@ type NamedVulnFunc struct {
|
|||
VulnFunc func(ctx context.Context) error
|
||||
}
|
||||
|
||||
func getVulnFuncs(ds fleet.Datastore, logger *logging.Logger, config *config.VulnerabilitiesConfig) []NamedVulnFunc {
|
||||
func getVulnFuncs(ds fleet.Datastore, logger *slog.Logger, config *config.VulnerabilitiesConfig) []NamedVulnFunc {
|
||||
vulnFuncs := []NamedVulnFunc{
|
||||
{
|
||||
Name: "cron_vulnerabilities",
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -13,7 +14,6 @@ import (
|
|||
|
||||
"github.com/fleetdm/fleet/v4/server/config"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
"github.com/fleetdm/fleet/v4/server/service/calendar"
|
||||
"github.com/fleetdm/fleet/v4/server/service/schedule"
|
||||
|
|
@ -31,7 +31,7 @@ func NewCalendarSchedule(
|
|||
ds fleet.Datastore,
|
||||
distributedLock fleet.Lock,
|
||||
serverConfig config.CalendarConfig,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*schedule.Schedule, error) {
|
||||
const (
|
||||
name = string(fleet.CronCalendar)
|
||||
|
|
@ -59,7 +59,7 @@ func NewCalendarSchedule(
|
|||
}
|
||||
|
||||
func cronCalendarEvents(ctx context.Context, ds fleet.Datastore, distributedLock fleet.Lock, serverConfig config.CalendarConfig,
|
||||
logger *logging.Logger) error {
|
||||
logger *slog.Logger) error {
|
||||
appConfig, err := ds.AppConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load app config: %w", err)
|
||||
|
|
@ -106,7 +106,7 @@ func cronCalendarEventsForTeam(
|
|||
team fleet.Team,
|
||||
orgName string,
|
||||
domain string,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) error {
|
||||
if team.Config.Integrations.GoogleCalendar == nil ||
|
||||
!team.Config.Integrations.GoogleCalendar.Enable {
|
||||
|
|
@ -203,7 +203,7 @@ func processCalendarFailingHosts(
|
|||
calendarConfig *calendar.Config,
|
||||
orgName string,
|
||||
hosts []fleet.HostPolicyMembershipData,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) {
|
||||
hosts = filterHostsWithSameEmail(hosts)
|
||||
|
||||
|
|
@ -247,7 +247,7 @@ func processCalendarFailingHosts(
|
|||
}
|
||||
}
|
||||
|
||||
userCalendar := calendar.CreateUserCalendarFromConfig(ctx, calendarConfig, logger.SlogLogger())
|
||||
userCalendar := calendar.CreateUserCalendarFromConfig(ctx, calendarConfig, logger)
|
||||
if err := userCalendar.Configure(host.Email); err != nil {
|
||||
logger.ErrorContext(ctx, "configure user calendar", "err", err)
|
||||
continue // continue with next host
|
||||
|
|
@ -315,7 +315,7 @@ func processFailingHostExistingCalendarEvent(
|
|||
host fleet.HostPolicyMembershipData,
|
||||
policyIDtoPolicy *sync.Map,
|
||||
calendarConfig *calendar.Config,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) error {
|
||||
|
||||
// Try to acquire the lock. Lock is needed to ensure calendar callback is not processed for this event at the same time.
|
||||
|
|
@ -394,7 +394,7 @@ func processFailingHostExistingCalendarEvent(
|
|||
var newETag string
|
||||
var genBodyFn fleet.CalendarGenBodyFn = func(conflict bool) (string, bool, error) {
|
||||
var body string
|
||||
body, generatedTag = calendar.GenerateCalendarEventBody(ctx, ds, orgName, host, policyIDtoPolicy, conflict, logger.SlogLogger())
|
||||
body, generatedTag = calendar.GenerateCalendarEventBody(ctx, ds, orgName, host, policyIDtoPolicy, conflict, logger)
|
||||
return body, true, nil
|
||||
}
|
||||
|
||||
|
|
@ -507,7 +507,7 @@ func processFailingHostExistingCalendarEvent(
|
|||
}
|
||||
|
||||
func getBodyTag(ctx context.Context, ds fleet.Datastore, host fleet.HostPolicyMembershipData, policyIDtoPolicy *sync.Map,
|
||||
logger *logging.Logger) string {
|
||||
logger *slog.Logger) string {
|
||||
var updatedBodyTag string
|
||||
policyIDs := strings.Split(host.FailingPolicyIDs, ",")
|
||||
if len(policyIDs) == 1 && policyIDs[0] != "" {
|
||||
|
|
@ -581,7 +581,7 @@ func processFailingHostCreateCalendarEvent(
|
|||
orgName string,
|
||||
host fleet.HostPolicyMembershipData,
|
||||
policyIDtoPolicy *sync.Map,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) error {
|
||||
calendarEvent, err := attemptCreatingEventOnUserCalendar(ctx, ds, orgName, host, userCalendar, policyIDtoPolicy, logger)
|
||||
if err != nil {
|
||||
|
|
@ -603,7 +603,7 @@ func attemptCreatingEventOnUserCalendar(
|
|||
host fleet.HostPolicyMembershipData,
|
||||
userCalendar fleet.UserCalendar,
|
||||
policyIDtoPolicy *sync.Map,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) (*fleet.CalendarEvent, error) {
|
||||
year, month, today := time.Now().Date()
|
||||
preferredDate := getPreferredCalendarEventDate(year, month, today)
|
||||
|
|
@ -612,7 +612,7 @@ func attemptCreatingEventOnUserCalendar(
|
|||
calendarEvent, err := userCalendar.CreateEvent(
|
||||
preferredDate, func(conflict bool) (string, bool, error) {
|
||||
var body string
|
||||
body, generatedTag = calendar.GenerateCalendarEventBody(ctx, ds, orgName, host, policyIDtoPolicy, conflict, logger.SlogLogger())
|
||||
body, generatedTag = calendar.GenerateCalendarEventBody(ctx, ds, orgName, host, policyIDtoPolicy, conflict, logger)
|
||||
return body, true, nil
|
||||
}, fleet.CalendarCreateEventOpts{},
|
||||
)
|
||||
|
|
@ -663,7 +663,7 @@ func removeCalendarEventsFromPassingHosts(
|
|||
ds fleet.Datastore,
|
||||
calendarConfig *calendar.Config,
|
||||
hosts []fleet.HostPolicyMembershipData,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) {
|
||||
hostIDsByEmail := make(map[string][]uint)
|
||||
for _, host := range hosts {
|
||||
|
|
@ -706,7 +706,7 @@ func removeCalendarEventsFromPassingHosts(
|
|||
logger.ErrorContext(ctx, "get calendar event from DB", "err", err)
|
||||
continue
|
||||
}
|
||||
userCalendar := calendar.CreateUserCalendarFromConfig(ctx, calendarConfig, logger.SlogLogger())
|
||||
userCalendar := calendar.CreateUserCalendarFromConfig(ctx, calendarConfig, logger)
|
||||
if err := deleteCalendarEvent(ctx, ds, userCalendar, calendarEvent); err != nil {
|
||||
logger.ErrorContext(ctx, "delete user calendar event", "err", err)
|
||||
continue
|
||||
|
|
@ -727,7 +727,7 @@ func logHostsWithoutAssociatedEmail(
|
|||
ctx context.Context,
|
||||
domain string,
|
||||
hosts []fleet.HostPolicyMembershipData,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) {
|
||||
if len(hosts) == 0 {
|
||||
return
|
||||
|
|
@ -763,7 +763,7 @@ func isHostOnline(ctx context.Context, ds fleet.Datastore, hostID uint) (bool, e
|
|||
}
|
||||
}
|
||||
|
||||
func cronCalendarEventsCleanup(ctx context.Context, ds fleet.Datastore, logger *logging.Logger) error {
|
||||
func cronCalendarEventsCleanup(ctx context.Context, ds fleet.Datastore, logger *slog.Logger) error {
|
||||
appConfig, err := ds.AppConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load app config: %w", err)
|
||||
|
|
@ -776,7 +776,7 @@ func cronCalendarEventsCleanup(ctx context.Context, ds fleet.Datastore, logger *
|
|||
GoogleCalendarIntegration: *appConfig.Integrations.GoogleCalendar[0],
|
||||
ServerURL: appConfig.ServerSettings.ServerURL,
|
||||
}
|
||||
userCalendar = calendar.CreateUserCalendarFromConfig(ctx, calConfig, logger.SlogLogger())
|
||||
userCalendar = calendar.CreateUserCalendarFromConfig(ctx, calConfig, logger)
|
||||
}
|
||||
|
||||
// If global setting is disabled, we remove all calendar events from the DB
|
||||
|
|
@ -828,7 +828,7 @@ func deleteAllCalendarEvents(
|
|||
ds fleet.Datastore,
|
||||
calendarConfig *calendar.Config,
|
||||
teamID *uint,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) error {
|
||||
calendarEvents, err := ds.ListCalendarEvents(ctx, teamID)
|
||||
if err != nil {
|
||||
|
|
@ -840,7 +840,7 @@ func deleteAllCalendarEvents(
|
|||
|
||||
func deleteCalendarEventsInParallel(
|
||||
ctx context.Context, ds fleet.Datastore, calendarConfig *calendar.Config, calendarEvents []*fleet.CalendarEvent,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) {
|
||||
if len(calendarEvents) > 0 {
|
||||
calendarEventCh := make(chan *fleet.CalendarEvent)
|
||||
|
|
@ -852,7 +852,7 @@ func deleteCalendarEventsInParallel(
|
|||
for calEvent := range calendarEventCh {
|
||||
var userCalendar fleet.UserCalendar
|
||||
if calendarConfig != nil {
|
||||
userCalendar = calendar.CreateUserCalendarFromConfig(ctx, calendarConfig, logger.SlogLogger())
|
||||
userCalendar = calendar.CreateUserCalendarFromConfig(ctx, calendarConfig, logger)
|
||||
}
|
||||
if err := deleteCalendarEvent(ctx, ds, userCalendar, calEvent); err != nil {
|
||||
logger.ErrorContext(ctx, "delete user calendar event", "err", err)
|
||||
|
|
@ -874,7 +874,7 @@ func cleanupTeamCalendarEvents(
|
|||
ds fleet.Datastore,
|
||||
calendarConfig *calendar.Config,
|
||||
team fleet.Team,
|
||||
logger *logging.Logger,
|
||||
logger *slog.Logger,
|
||||
) error {
|
||||
teamFeatureEnabled := team.Config.Integrations.GoogleCalendar != nil && team.Config.Integrations.GoogleCalendar.Enable
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
"github.com/fleetdm/fleet/v4/server/service/redis_lock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -129,7 +129,7 @@ func TestEventForDifferentHost(t *testing.T) {
|
|||
t.Parallel()
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := logging.NewLogfmtLogger(os.Stdout)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
ds.AppConfigFunc = func(ctx context.Context) (*fleet.AppConfig, error) {
|
||||
return &fleet.AppConfig{
|
||||
Integrations: fleet.Integrations{
|
||||
|
|
@ -208,7 +208,7 @@ func TestEventForDifferentHost(t *testing.T) {
|
|||
func TestCalendarEventsMultipleHosts(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := logging.NewLogfmtLogger(os.Stdout)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
t.Cleanup(func() {
|
||||
calendar.ClearMockEvents()
|
||||
calendar.ClearMockChannels()
|
||||
|
|
@ -403,11 +403,11 @@ func (n notFoundErr) Error() string {
|
|||
func TestCalendarEvents1KHosts(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
var logger *logging.Logger
|
||||
var logger *slog.Logger
|
||||
if os.Getenv("CALENDAR_TEST_LOGGING") != "" {
|
||||
logger = logging.NewLogfmtLogger(os.Stdout)
|
||||
logger = slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
} else {
|
||||
logger = logging.NewNopLogger()
|
||||
logger = slog.New(slog.DiscardHandler)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
calendar.ClearMockEvents()
|
||||
|
|
@ -715,7 +715,7 @@ func TestCalendarEvents1KHosts(t *testing.T) {
|
|||
func TestEventBody(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := logging.NewLogfmtLogger(os.Stdout)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
t.Cleanup(
|
||||
func() {
|
||||
calendar.ClearMockEvents()
|
||||
|
|
|
|||
|
|
@ -191,7 +191,7 @@ func (ds *Datastore) MarkSessionAccessed(ctx context.Context, session *fleet.Ses
|
|||
results, err := ds.writer(ctx).ExecContext(ctx, sqlStatement, ds.clock.Now(), session.ID)
|
||||
if err != nil {
|
||||
if common_mysql.IsReadOnlyError(err) {
|
||||
common_mysql.TriggerFatalError(err)
|
||||
common_mysql.TriggerFatalError(ctx, err)
|
||||
}
|
||||
return ctxerr.Wrap(ctx, err, "updating mark session as accessed")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ func WithTxx(ctx context.Context, db *sqlx.DB, fn TxFn, logger *slog.Logger) err
|
|||
return ctxerr.Wrapf(ctx, err, "got err '%s' rolling back after err", rbErr.Error())
|
||||
}
|
||||
if IsReadOnlyError(err) {
|
||||
TriggerFatalError(err)
|
||||
TriggerFatalError(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
@ -191,7 +191,7 @@ func WithTxx(ctx context.Context, db *sqlx.DB, fn TxFn, logger *slog.Logger) err
|
|||
if err := tx.Commit(); err != nil {
|
||||
err = ctxerr.Wrap(ctx, err, "commit transaction")
|
||||
if IsReadOnlyError(err) {
|
||||
TriggerFatalError(err)
|
||||
TriggerFatalError(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
var (
|
||||
fatalErrorMu sync.RWMutex
|
||||
fatalErrorHandler func(error)
|
||||
fatalErrorHandler func(context.Context, error)
|
||||
fatalErrorOnce sync.Once
|
||||
)
|
||||
|
||||
|
|
@ -28,7 +28,7 @@ var (
|
|||
// process shutdown.
|
||||
//
|
||||
// If no handler is set, the default behavior is to panic.
|
||||
func SetFatalErrorHandler(fn func(error)) {
|
||||
func SetFatalErrorHandler(fn func(context.Context, error)) {
|
||||
fatalErrorMu.Lock()
|
||||
defer fatalErrorMu.Unlock()
|
||||
fatalErrorHandler = fn
|
||||
|
|
@ -37,7 +37,7 @@ func SetFatalErrorHandler(fn func(error)) {
|
|||
|
||||
// TriggerFatalError calls the registered fatal error handler exactly once.
|
||||
// If no handler is registered, it panics (legacy behavior).
|
||||
func TriggerFatalError(err error) {
|
||||
func TriggerFatalError(ctx context.Context, err error) {
|
||||
fatalErrorMu.RLock()
|
||||
defer fatalErrorMu.RUnlock()
|
||||
|
||||
|
|
@ -46,7 +46,7 @@ func TriggerFatalError(err error) {
|
|||
}
|
||||
|
||||
fatalErrorOnce.Do(func() {
|
||||
fatalErrorHandler(err)
|
||||
fatalErrorHandler(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ func WithRetryTxx(ctx context.Context, db *sqlx.DB, fn TxFn, logger *slog.Logger
|
|||
// Read-only errors indicate a DB failover occurred (primary demoted to reader).
|
||||
// Trigger graceful shutdown so the orchestrator restarts and reconnects to the new primary.
|
||||
if IsReadOnlyError(err) {
|
||||
TriggerFatalError(err)
|
||||
TriggerFatalError(ctx, err)
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ func WithRetryTxx(ctx context.Context, db *sqlx.DB, fn TxFn, logger *slog.Logger
|
|||
err = ctxerr.Wrap(ctx, err, "commit transaction")
|
||||
|
||||
if IsReadOnlyError(err) {
|
||||
TriggerFatalError(err)
|
||||
TriggerFatalError(ctx, err)
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
|
@ -22,14 +23,14 @@ func readOnlyErr() error {
|
|||
func TestTriggerFatalErrorCallsHandler(t *testing.T) {
|
||||
var called atomic.Bool
|
||||
var capturedErr atomic.Value
|
||||
SetFatalErrorHandler(func(err error) {
|
||||
SetFatalErrorHandler(func(_ context.Context, err error) {
|
||||
called.Store(true)
|
||||
capturedErr.Store(err)
|
||||
})
|
||||
t.Cleanup(func() { SetFatalErrorHandler(nil) })
|
||||
|
||||
testErr := errors.New("test read-only error")
|
||||
TriggerFatalError(testErr)
|
||||
TriggerFatalError(t.Context(), testErr)
|
||||
|
||||
assert.True(t, called.Load())
|
||||
assert.Equal(t, testErr, capturedErr.Load())
|
||||
|
|
@ -39,13 +40,13 @@ func TestTriggerFatalErrorPanicsWithoutHandler(t *testing.T) {
|
|||
SetFatalErrorHandler(nil)
|
||||
|
||||
assert.Panics(t, func() {
|
||||
TriggerFatalError(errors.New("read-only"))
|
||||
TriggerFatalError(t.Context(), errors.New("read-only"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTriggerFatalErrorFiresOnce(t *testing.T) {
|
||||
var callCount atomic.Int32
|
||||
SetFatalErrorHandler(func(_ error) {
|
||||
SetFatalErrorHandler(func(_ context.Context, _ error) {
|
||||
callCount.Add(1)
|
||||
})
|
||||
t.Cleanup(func() { SetFatalErrorHandler(nil) })
|
||||
|
|
@ -53,7 +54,7 @@ func TestTriggerFatalErrorFiresOnce(t *testing.T) {
|
|||
var wg sync.WaitGroup
|
||||
for range 100 {
|
||||
wg.Go(func() {
|
||||
TriggerFatalError(errors.New("read-only"))
|
||||
TriggerFatalError(t.Context(), errors.New("read-only"))
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
|
|
@ -120,7 +121,7 @@ func TestTransactionReadOnlyTriggersFatalError(t *testing.T) {
|
|||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var handlerCalled atomic.Bool
|
||||
SetFatalErrorHandler(func(_ error) {
|
||||
SetFatalErrorHandler(func(_ context.Context, _ error) {
|
||||
handlerCalled.Store(true)
|
||||
})
|
||||
t.Cleanup(func() { SetFatalErrorHandler(nil) })
|
||||
|
|
|
|||
|
|
@ -121,9 +121,9 @@ func (s *integrationEnterpriseTestSuite) SetupSuite() {
|
|||
return func() (fleet.CronSchedule, error) {
|
||||
// We set 24-hour interval so that it only runs when triggered.
|
||||
var err error
|
||||
cronLog := logging.NewJSONLogger(os.Stdout)
|
||||
cronLog := slog.New(slog.NewJSONHandler(os.Stdout, nil))
|
||||
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
|
||||
cronLog = logging.NewNopLogger()
|
||||
cronLog = slog.New(slog.DiscardHandler)
|
||||
}
|
||||
calendarSchedule, err = cron.NewCalendarSchedule(
|
||||
ctx, s.T().Name(), s.ds, redis_lock.NewLock(s.redisPool), config.CalendarConfig{Periodicity: 24 * time.Hour},
|
||||
|
|
|
|||
|
|
@ -274,9 +274,9 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
var profileSchedule *schedule.Schedule
|
||||
var cleanupsSchedule *schedule.Schedule
|
||||
var androidProfileSchedule *schedule.Schedule
|
||||
cronLog := logging.NewJSONLogger(os.Stdout)
|
||||
cronLog := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
|
||||
cronLog = logging.NewNopLogger()
|
||||
cronLog = slog.New(slog.DiscardHandler)
|
||||
}
|
||||
serverLogger := logging.NewJSONLogger(os.Stdout)
|
||||
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
|
||||
|
|
@ -331,41 +331,41 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
ctx, name, s.T().Name(), 1*time.Hour, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("manage_apple_profiles", func(ctx context.Context) error {
|
||||
logger.Log("msg", "Starting manage_apple_profiles job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
logger.InfoContext(ctx, "Starting manage_apple_profiles job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
if s.onProfileJobDone != nil {
|
||||
defer func() {
|
||||
logger.Log("msg", "Completing manage_apple_profiles job", "test", s.T().Name(), "time",
|
||||
logger.InfoContext(ctx, "Completing manage_apple_profiles job", "test", s.T().Name(), "time",
|
||||
time.Now().Format(time.RFC3339))
|
||||
s.onProfileJobDone()
|
||||
}()
|
||||
}
|
||||
err = ReconcileAppleProfiles(ctx, ds, mdmCommander, logger.SlogLogger())
|
||||
err = ReconcileAppleProfiles(ctx, ds, mdmCommander, logger)
|
||||
require.NoError(s.T(), err)
|
||||
return err
|
||||
}),
|
||||
schedule.WithJob("manage_apple_declarations", func(ctx context.Context) error {
|
||||
logger.Log("msg", "Starting manage_apple_declarations job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
logger.InfoContext(ctx, "Starting manage_apple_declarations job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
if s.onProfileJobDone != nil {
|
||||
defer func() {
|
||||
logger.Log("msg", "Completing manage_apple_declarations job", "test", s.T().Name(), "time",
|
||||
logger.InfoContext(ctx, "Completing manage_apple_declarations job", "test", s.T().Name(), "time",
|
||||
time.Now().Format(time.RFC3339))
|
||||
s.onProfileJobDone()
|
||||
}()
|
||||
}
|
||||
err = ReconcileAppleDeclarations(ctx, ds, mdmCommander, logger.SlogLogger())
|
||||
err = ReconcileAppleDeclarations(ctx, ds, mdmCommander, logger)
|
||||
require.NoError(s.T(), err)
|
||||
return err
|
||||
}),
|
||||
schedule.WithJob("manage_windows_profiles", func(ctx context.Context) error {
|
||||
logger.Log("msg", "Starting manage_windows_profiles job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
logger.InfoContext(ctx, "Starting manage_windows_profiles job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
if s.onProfileJobDone != nil {
|
||||
defer func() {
|
||||
logger.Log("msg", "Completing manage_windows_profiles job", "test", s.T().Name(), "time",
|
||||
logger.InfoContext(ctx, "Completing manage_windows_profiles job", "test", s.T().Name(), "time",
|
||||
time.Now().Format(time.RFC3339))
|
||||
s.onProfileJobDone()
|
||||
}()
|
||||
}
|
||||
err := ReconcileWindowsProfiles(ctx, ds, logger.SlogLogger())
|
||||
err := ReconcileWindowsProfiles(ctx, ds, logger)
|
||||
require.NoError(s.T(), err)
|
||||
return err
|
||||
}),
|
||||
|
|
@ -394,7 +394,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
defer s.onIntegrationsScheduleDone()
|
||||
}
|
||||
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger.SlogLogger())
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger)
|
||||
}),
|
||||
)
|
||||
return integrationsSchedule, nil
|
||||
|
|
@ -414,7 +414,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
if s.onCleanupScheduleDone != nil {
|
||||
defer s.onCleanupScheduleDone()
|
||||
}
|
||||
return android_service.RenewCertificateTemplates(ctx, ds, logger.SlogLogger())
|
||||
return android_service.RenewCertificateTemplates(ctx, ds, logger)
|
||||
}),
|
||||
)
|
||||
return cleanupsSchedule, nil
|
||||
|
|
@ -428,15 +428,15 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
ctx, name, s.T().Name(), 1*time.Hour, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("manage_android_profiles", func(ctx context.Context) error {
|
||||
logger.Log("msg", "Starting manage_android_profiles job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
logger.InfoContext(ctx, "Starting manage_android_profiles job", "test", s.T().Name(), "time", time.Now().Format(time.RFC3339))
|
||||
if s.onAndroidProfileJobDone != nil {
|
||||
defer func() {
|
||||
logger.Log("msg", "Completing manage_android_profiles job", "test", s.T().Name(), "time",
|
||||
logger.InfoContext(ctx, "Completing manage_android_profiles job", "test", s.T().Name(), "time",
|
||||
time.Now().Format(time.RFC3339))
|
||||
s.onAndroidProfileJobDone()
|
||||
}()
|
||||
}
|
||||
err := android_service.ReconcileProfilesWithClient(ctx, ds, logger.SlogLogger(), "", androidMockClient, config.AndroidAgentConfig{
|
||||
err := android_service.ReconcileProfilesWithClient(ctx, ds, logger, "", androidMockClient, config.AndroidAgentConfig{
|
||||
Package: "com.fleetdm.agent",
|
||||
SigningSHA256: "abc123def456",
|
||||
})
|
||||
|
|
@ -455,7 +455,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
ctx, name, s.T().Name(), 1*time.Hour, ds, ds,
|
||||
schedule.WithLogger(logger),
|
||||
schedule.WithJob("cron_iphone_ipad_refetcher", func(ctx context.Context) error {
|
||||
return apple_mdm.IOSiPadOSRefetch(ctx, ds, mdmCommander, logger.SlogLogger(), s.fleetSvc.NewActivity)
|
||||
return apple_mdm.IOSiPadOSRefetch(ctx, ds, mdmCommander, logger, s.fleetSvc.NewActivity)
|
||||
}),
|
||||
)
|
||||
return refetcherSchedule, nil
|
||||
|
|
@ -11661,8 +11661,8 @@ func (s *integrationMDMTestSuite) TestSilentMigrationGotchas() {
|
|||
require.NoError(t, err)
|
||||
fleetCfg := config.TestConfig()
|
||||
config.SetTestMDMConfig(s.T(), &fleetCfg, cert, key, "")
|
||||
logger := logging.NewJSONLogger(os.Stdout)
|
||||
err = RenewSCEPCertificates(ctx, logger.SlogLogger(), s.ds, &fleetCfg, s.mdmCommander)
|
||||
scepLogger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
|
||||
err = RenewSCEPCertificates(ctx, scepLogger, s.ds, &fleetCfg, s.mdmCommander)
|
||||
require.NoError(t, err)
|
||||
|
||||
// no new commands were enqueued
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package schedule
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
|
@ -32,7 +32,7 @@ type Schedule struct {
|
|||
ctx context.Context
|
||||
name string
|
||||
instanceID string
|
||||
logger *logging.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
defaultPrevRunCreatedAt time.Time // default timestamp of previous run for the schedule if none exists, time.Now if not set
|
||||
|
||||
|
|
@ -96,7 +96,7 @@ type CronStatsStore interface {
|
|||
type Option func(*Schedule)
|
||||
|
||||
// WithLogger sets a logger for the Schedule.
|
||||
func WithLogger(l *logging.Logger) Option {
|
||||
func WithLogger(l *slog.Logger) Option {
|
||||
return func(s *Schedule) {
|
||||
s.logger = l.With("schedule", s.name)
|
||||
}
|
||||
|
|
@ -182,7 +182,7 @@ func New(
|
|||
ctx: ctx,
|
||||
name: name,
|
||||
instanceID: instanceID,
|
||||
logger: logging.NewNopLogger(),
|
||||
logger: slog.New(slog.DiscardHandler),
|
||||
trigger: make(chan int),
|
||||
done: make(chan struct{}),
|
||||
configReloadInterval: 1 * time.Hour, // by default we will check for updated config once per hour
|
||||
|
|
@ -194,7 +194,7 @@ func New(
|
|||
fn(sch)
|
||||
}
|
||||
if sch.logger == nil {
|
||||
sch.logger = logging.NewNopLogger()
|
||||
sch.logger = slog.New(slog.DiscardHandler)
|
||||
}
|
||||
sch.logger = sch.logger.With("instanceID", instanceID)
|
||||
sch.errors = make(fleet.CronScheduleErrors)
|
||||
|
|
|
|||
Loading…
Reference in a new issue