Changes needed before gokit/log to slog transition. (#39527)

<!-- Add the related story/sub-task/bug number, like Resolves #123, or
remove if NA -->
**Related issue:** Resolves #38889

PLEASE READ BELOW before looking at file changes

Before converting individual files/packages to slog, we generally need
to make these 2 changes to make the conversion easier:
- Replace uses of `kitlog.With` since they are not fully compatible with
our kitlog adapter
- Directly use the kitlog adapter logger type instead of the kitlog
interface, which will let us have direct access to the underlying slog
logger: `*logging.Logger`

Note: that I did not replace absolutely all uses of `kitlog.Logger`, but
I did remove all uses of `kitlog.With` except for these due to
complexity:
- server/logging/filesystem.go and the other log writers (webhook,
firehose, kinesis, lambda, pubsub, nats)
- server/datastore/mysql/nanomdm_storage.go (adapter pattern)
- server/vulnerabilities/nvd/* (cascades to CLI tools)
- server/service/osquery_utils/queries.go (callback type signatures
cascade broadly)
- cmd/maintained-apps/ (standalone, so can be transitioned later all at
once)

Most of the changes in this PR follow these patterns:
- `kitlog.Logger` type → `*logging.Logger`
- `kitlog.With(logger, ...)` → `logger.With(...)`
- `kitlog.NewNopLogger() → logging.NewNopLogger()`, including similar
variations such as `logging.NewLogfmtLogger(w)` and
`logging.NewJSONLogger(w)`
- removed many now-unused kitlog imports

Unique changes that the PR review should focus on:
- server/platform/logging/kitlog_adapter.go: Core adapter changes
- server/platform/logging/logging.go: New convenience functions
- server/service/integration_logger_test.go: Test changes for slog

# Checklist for submitter

If some of the following don't apply, delete the relevant line.

- [x] Changes file added for user-visible changes in `changes/`,
`orbit/changes/` or `ee/fleetd-chrome/changes`.
  - Was added in previous PR

## Testing

- [x] Added/updated automated tests
- [x] QA'd all new/changed functionality manually


<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

* **Refactor**
* Migrated the codebase to a unified internal structured logging system
for more consistent, reliable logs and observability.
* No user-facing functionality changed; runtime behavior and APIs remain
compatible.
* **Tests**
* Updated tests to use the new logging helpers to ensure consistent test
logging and validation.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
This commit is contained in:
Victor Lyuboslavsky 2026-02-11 10:08:33 -06:00 committed by GitHub
parent 37e7e84f3c
commit aaac4b1dfe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
78 changed files with 603 additions and 572 deletions

View file

@ -78,6 +78,7 @@ linters:
exclude-functions:
# Logging
- "(github.com/go-kit/log.Logger).Log"
- "(*github.com/fleetdm/fleet/v4/server/platform/logging.Logger).Log"
# fmt package
- fmt.Fprint
- fmt.Fprintf

View file

@ -30,6 +30,7 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/assets"
maintained_apps "github.com/fleetdm/fleet/v4/server/mdm/maintainedapps"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/policies"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
@ -43,11 +44,10 @@ import (
"github.com/fleetdm/fleet/v4/server/vulnerabilities/utils"
"github.com/fleetdm/fleet/v4/server/webhooks"
"github.com/fleetdm/fleet/v4/server/worker"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
)
func errHandler(ctx context.Context, logger kitlog.Logger, msg string, err error) {
func errHandler(ctx context.Context, logger *logging.Logger, msg string, err error) {
level.Error(logger).Log("msg", msg, "err", err)
ctxerr.Handle(ctx, err)
}
@ -56,12 +56,12 @@ func newVulnerabilitiesSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
config *config.VulnerabilitiesConfig,
) (*schedule.Schedule, error) {
const name = string(fleet.CronVulnerabilities)
interval := config.Periodicity
vulnerabilitiesLogger := kitlog.With(logger, "cron", name)
vulnerabilitiesLogger := logger.With("cron", name)
var options []schedule.Option
@ -80,7 +80,7 @@ func newVulnerabilitiesSchedule(
func cronVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
config *config.VulnerabilitiesConfig,
) error {
if config == nil {
@ -115,7 +115,7 @@ func cronVulnerabilities(
return nil
}
func updateVulnHostCounts(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger, maxConcurrency int) error {
func updateVulnHostCounts(ctx context.Context, ds fleet.Datastore, logger *logging.Logger, maxConcurrency int) error {
// Prevent invalid values for max concurrency
if maxConcurrency <= 0 {
level.Info(logger).Log("msg", "invalid maxConcurrency value provided, setting value to 1", "providedValue", maxConcurrency)
@ -136,7 +136,7 @@ func updateVulnHostCounts(ctx context.Context, ds fleet.Datastore, logger kitlog
func scanVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
config *config.VulnerabilitiesConfig,
appConfig *fleet.AppConfig,
vulnPath string,
@ -231,7 +231,7 @@ func scanVulnerabilities(
if err := webhooks.TriggerVulnerabilitiesWebhook(
ctx,
ds,
kitlog.With(logger, "webhook", "vulnerabilities"),
logger.With("webhook", "vulnerabilities"),
args,
mapper,
); err != nil {
@ -243,7 +243,7 @@ func scanVulnerabilities(
if err := worker.QueueJiraVulnJobs(
ctx,
ds,
kitlog.With(logger, "jira", "vulnerabilities"),
logger.With("jira", "vulnerabilities"),
recentV,
matchingMeta,
); err != nil {
@ -255,7 +255,7 @@ func scanVulnerabilities(
if err := worker.QueueZendeskVulnJobs(
ctx,
ds,
kitlog.With(logger, "zendesk", "vulnerabilities"),
logger.With("zendesk", "vulnerabilities"),
recentV,
matchingMeta,
); err != nil {
@ -274,7 +274,7 @@ func scanVulnerabilities(
func checkCustomVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
collectVulns bool,
startTime time.Time,
) []fleet.SoftwareVulnerability {
@ -295,7 +295,7 @@ func checkCustomVulnerabilities(
func checkWinVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
vulnPath string,
config *config.VulnerabilitiesConfig,
collectVulns bool,
@ -336,7 +336,7 @@ func checkWinVulnerabilities(
"found new", len(r))
results = append(results, r...)
if err != nil {
errHandler(ctx, kitlog.With(logger, "os name", o.Name, "display version", o.DisplayVersion), "analyzing hosts for Windows vulnerabilities", err)
errHandler(ctx, logger.With("os name", o.Name, "display version", o.DisplayVersion), "analyzing hosts for Windows vulnerabilities", err)
}
}
}
@ -347,7 +347,7 @@ func checkWinVulnerabilities(
func checkOvalVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
vulnPath string,
config *config.VulnerabilitiesConfig,
collectVulns bool,
@ -399,7 +399,7 @@ func checkOvalVulnerabilities(
func checkGovalDictionaryVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
vulnPath string,
config *config.VulnerabilitiesConfig,
collectVulns bool,
@ -450,7 +450,7 @@ func checkGovalDictionaryVulnerabilities(
func checkNVDVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
vulnPath string,
config *config.VulnerabilitiesConfig,
collectVulns bool,
@ -494,7 +494,7 @@ func checkNVDVulnerabilities(
func checkMacOfficeVulnerabilities(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
vulnPath string,
config *config.VulnerabilitiesConfig,
collectVulns bool,
@ -528,7 +528,7 @@ func newAutomationsSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
intervalReload time.Duration,
failingPoliciesSet fleet.FailingPolicySet,
) (*schedule.Schedule, error) {
@ -543,7 +543,7 @@ func newAutomationsSchedule(
s := schedule.New(
// TODO(sarah): Reconfigure settings so automations interval doesn't reside under webhook settings
ctx, name, instanceID, appConfig.WebhookSettings.Interval.ValueOr(defaultInterval), ds, ds,
schedule.WithLogger(kitlog.With(logger, "cron", name)),
schedule.WithLogger(logger.With("cron", name)),
schedule.WithConfigReloadInterval(intervalReload, func(ctx context.Context) (time.Duration, error) {
appConfig, err := ds.AppConfig(ctx)
if err != nil {
@ -556,20 +556,20 @@ func newAutomationsSchedule(
"host_status_webhook",
func(ctx context.Context) error {
return webhooks.TriggerHostStatusWebhook(
ctx, ds, kitlog.With(logger, "automation", "host_status"),
ctx, ds, logger.With("automation", "host_status"),
)
},
),
schedule.WithJob(
"fire_outdated_automations",
func(ctx context.Context) error {
return scheduleFailingPoliciesAutomation(ctx, ds, kitlog.With(logger, "automation", "fire_outdated_automations"), failingPoliciesSet)
return scheduleFailingPoliciesAutomation(ctx, ds, logger.With("automation", "fire_outdated_automations"), failingPoliciesSet)
},
),
schedule.WithJob(
"failing_policies_automation",
func(ctx context.Context) error {
return triggerFailingPoliciesAutomation(ctx, ds, kitlog.With(logger, "automation", "failing_policies"), failingPoliciesSet)
return triggerFailingPoliciesAutomation(ctx, ds, logger.With("automation", "failing_policies"), failingPoliciesSet)
},
),
)
@ -580,7 +580,7 @@ func newAutomationsSchedule(
func scheduleFailingPoliciesAutomation(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
failingPoliciesSet fleet.FailingPolicySet,
) error {
for {
@ -604,7 +604,7 @@ func scheduleFailingPoliciesAutomation(
func triggerFailingPoliciesAutomation(
ctx context.Context,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
failingPoliciesSet fleet.FailingPolicySet,
) error {
appConfig, err := ds.AppConfig(ctx)
@ -659,7 +659,7 @@ func newWorkerIntegrationsSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
depStorage *mysql.NanoDEPStorage,
commander *apple_mdm.MDMAppleCommander,
bootstrapPackageStore fleet.MDMBootstrapPackageStore,
@ -677,7 +677,7 @@ func newWorkerIntegrationsSchedule(
maxRunTime = 10 * time.Minute // allow the worker to run for 10 minutes
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
// create the worker and register the Jira and Zendesk jobs even if no
// integration is enabled, as that config can change live (and if it's not
@ -839,7 +839,7 @@ func newCleanupsAndAggregationSchedule(
instanceID string,
ds fleet.Datastore,
svc fleet.Service,
logger kitlog.Logger,
logger *logging.Logger,
enrollHostLimiter fleet.EnrollHostLimiter,
config *config.FleetConfig,
commander *apple_mdm.MDMAppleCommander,
@ -856,7 +856,7 @@ func newCleanupsAndAggregationSchedule(
ctx, name, instanceID, defaultInterval, ds, ds,
// Using leader for the lock to be backwards compatilibity with old deployments.
schedule.WithAltLockID("leader"),
schedule.WithLogger(kitlog.With(logger, "cron", name)),
schedule.WithLogger(logger.With("cron", name)),
// Run cleanup jobs first.
schedule.WithJob(
"distributed_query_campaigns",
@ -1096,7 +1096,7 @@ func newFrequentCleanupsSchedule(
instanceID string,
ds fleet.Datastore,
lq fleet.LiveQueryStore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronFrequentCleanups)
@ -1106,7 +1106,7 @@ func newFrequentCleanupsSchedule(
ctx, name, instanceID, defaultInterval, ds, ds,
// Using leader for the lock to be backwards compatilibity with old deployments.
schedule.WithAltLockID("leader_frequent_cleanups"),
schedule.WithLogger(kitlog.With(logger, "cron", name)),
schedule.WithLogger(logger.With("cron", name)),
// Run cleanup jobs first.
schedule.WithJob("redis_live_queries", func(ctx context.Context) error {
// It's necessary to avoid lingering live queries in case of:
@ -1135,7 +1135,7 @@ func newQueryResultsCleanupSchedule(
instanceID string,
ds fleet.Datastore,
liveQueryStore fleet.LiveQueryStore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronQueryResultsCleanup)
@ -1143,7 +1143,7 @@ func newQueryResultsCleanupSchedule(
)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(kitlog.With(logger, "cron", name)),
schedule.WithLogger(logger.With("cron", name)),
schedule.WithJob("cleanup_excess_query_results", func(ctx context.Context) error {
appConfig, err := ds.AppConfig(ctx)
if err != nil {
@ -1169,7 +1169,7 @@ func newQueryResultsCleanupSchedule(
func verifyDiskEncryptionKeys(
ctx context.Context,
logger kitlog.Logger,
logger *logging.Logger,
ds fleet.Datastore,
) error {
appCfg, err := ds.AppConfig(ctx)
@ -1221,14 +1221,14 @@ func verifyDiskEncryptionKeys(
return nil
}
func newUsageStatisticsSchedule(ctx context.Context, instanceID string, ds fleet.Datastore, config config.FleetConfig, logger kitlog.Logger) (*schedule.Schedule, error) {
func newUsageStatisticsSchedule(ctx context.Context, instanceID string, ds fleet.Datastore, config config.FleetConfig, logger *logging.Logger) (*schedule.Schedule, error) {
const (
name = string(fleet.CronUsageStatistics)
defaultInterval = 1 * time.Hour
)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(kitlog.With(logger, "cron", name)),
schedule.WithLogger(logger.With("cron", name)),
schedule.WithJob(
"try_send_statistics",
func(ctx context.Context) error {
@ -1281,10 +1281,10 @@ func newAppleMDMDEPProfileAssigner(
periodicity time.Duration,
ds fleet.Datastore,
depStorage *mysql.NanoDEPStorage,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const name = string(fleet.CronAppleMDMDEPProfileAssigner)
logger = kitlog.With(logger, "cron", name, "component", "nanodep-syncer")
logger = logger.With("cron", name, "component", "nanodep-syncer")
s := schedule.New(
ctx, name, instanceID, periodicity, ds, ds,
schedule.WithLogger(logger),
@ -1297,7 +1297,7 @@ func newAppleMDMDEPProfileAssigner(
func appleMDMDEPSyncerJob(
ds fleet.Datastore,
depStorage *mysql.NanoDEPStorage,
logger kitlog.Logger,
logger *logging.Logger,
) func(context.Context) error {
var fleetSyncer *apple_mdm.DEPService
return func(ctx context.Context) error {
@ -1343,7 +1343,7 @@ func newAppleMDMProfileManagerSchedule(
instanceID string,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronMDMAppleProfileManager)
@ -1353,7 +1353,7 @@ func newAppleMDMProfileManagerSchedule(
defaultInterval = 30 * time.Second
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1372,7 +1372,7 @@ func newWindowsMDMProfileManagerSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronMDMWindowsProfileManager)
@ -1382,7 +1382,7 @@ func newWindowsMDMProfileManagerSchedule(
defaultInterval = 30 * time.Second
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1398,7 +1398,7 @@ func newAndroidMDMProfileManagerSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
licenseKey string,
androidAgentConfig config.AndroidAgentConfig,
) (*schedule.Schedule, error) {
@ -1407,7 +1407,7 @@ func newAndroidMDMProfileManagerSchedule(
defaultInterval = 30 * time.Second
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1424,12 +1424,12 @@ func newMDMAppleServiceDiscoverySchedule(
instanceID string,
ds fleet.Datastore,
depStorage *mysql.NanoDEPStorage,
logger kitlog.Logger,
logger *logging.Logger,
urlPrefix string,
) (*schedule.Schedule, error) {
const name = "mdm_service_discovery"
interval := 1 * time.Hour
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, interval, ds, ds,
schedule.WithLogger(logger),
@ -1445,7 +1445,7 @@ func newMDMAPNsPusher(
instanceID string,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const name = string(fleet.CronAppleMDMAPNsPusher)
@ -1459,7 +1459,7 @@ func newMDMAPNsPusher(
}
}
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, interval, ds, ds,
schedule.WithLogger(logger),
@ -1480,7 +1480,7 @@ func newMDMAPNsPusher(
return s, nil
}
func cleanupCronStatsOnShutdown(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger, instanceID string) {
func cleanupCronStatsOnShutdown(ctx context.Context, ds fleet.Datastore, logger *logging.Logger, instanceID string) {
if err := ds.UpdateAllCronStatsForInstance(ctx, instanceID, fleet.CronStatsStatusPending, fleet.CronStatsStatusCanceled); err != nil {
logger.Log("err", "cancel pending cron stats for instance", "details", err)
}
@ -1491,14 +1491,14 @@ func newActivitiesStreamingSchedule(
instanceID string,
activitySvc activity_api.Service,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
auditLogger activity_api.JSONLogger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronActivitiesStreaming)
interval = 5 * time.Minute
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, interval, ds, ds,
schedule.WithLogger(logger),
@ -1518,13 +1518,13 @@ func newHostVitalsLabelMembershipSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronHostVitalsLabelMembership)
interval = 5 * time.Minute
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, interval, ds, ds,
schedule.WithLogger(logger),
@ -1569,13 +1569,13 @@ func newBatchActivityCompletionCheckerSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronBatchActivityCompletionChecker)
interval = 5 * time.Minute
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, interval, ds, ds,
schedule.WithLogger(logger),
@ -1600,7 +1600,7 @@ func cronBatchActivityCompletionChecker(
return nil
}
func stringSliceToUintSlice(s []string, logger kitlog.Logger) []uint {
func stringSliceToUintSlice(s []string, logger *logging.Logger) []uint {
result := make([]uint, 0, len(s))
for _, v := range s {
i, err := strconv.ParseUint(v, 10, 64)
@ -1626,11 +1626,11 @@ func newIPhoneIPadRefetcher(
periodicity time.Duration,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *logging.Logger,
newActivityFn apple_mdm.NewActivityFunc,
) (*schedule.Schedule, error) {
const name = string(fleet.CronAppleMDMIPhoneIPadRefetcher)
logger = kitlog.With(logger, "cron", name, "component", "iphone-ipad-refetcher")
logger = logger.With("cron", name, "component", "iphone-ipad-refetcher")
s := schedule.New(
ctx, name, instanceID, periodicity, ds, ds,
schedule.WithLogger(logger),
@ -1649,13 +1649,13 @@ func cronUninstallSoftwareMigration(
instanceID string,
ds fleet.Datastore,
softwareInstallStore fleet.SoftwareInstallerStore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronUninstallSoftwareMigration)
defaultInterval = 24 * time.Hour
)
logger = kitlog.With(logger, "cron", name, "component", name)
logger = logger.With("cron", name, "component", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1674,14 +1674,14 @@ func cronUpgradeCodeSoftwareMigration(
instanceID string,
ds fleet.Datastore,
softwareInstallStore fleet.SoftwareInstallerStore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronUpgradeCodeSoftwareMigration)
defaultInterval = 24 * time.Hour
priorJobDiff = -(defaultInterval - 30*time.Second)
)
logger = kitlog.With(logger, "cron", name, "component", name)
logger = logger.With("cron", name, "component", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1699,7 +1699,7 @@ func newMaintainedAppSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronMaintainedApps)
@ -1707,7 +1707,7 @@ func newMaintainedAppSchedule(
priorJobDiff = -(defaultInterval - 30*time.Second)
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1725,7 +1725,7 @@ func newRefreshVPPAppVersionsSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
vppAppsConfig apple_apps.Config,
) (*schedule.Schedule, error) {
const (
@ -1733,7 +1733,7 @@ func newRefreshVPPAppVersionsSchedule(
defaultInterval = 1 * time.Hour
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1754,10 +1754,10 @@ func newIPhoneIPadReviver(
instanceID string,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const name = string(fleet.CronAppleMDMIPhoneIPadReviver)
logger = kitlog.With(logger, "cron", name, "component", "iphone-ipad-reviver")
logger = logger.With("cron", name, "component", "iphone-ipad-reviver")
s := schedule.New(
ctx, name, instanceID, 1*time.Hour, ds, ds,
schedule.WithLogger(logger),
@ -1773,7 +1773,7 @@ func newUpcomingActivitiesSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronUpcomingActivitiesMaintenance)
@ -1781,7 +1781,7 @@ func newUpcomingActivitiesSchedule(
)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(kitlog.With(logger, "cron", name)),
schedule.WithLogger(logger.With("cron", name)),
schedule.WithJob("unblock_hosts_upcoming_activity_queue", func(ctx context.Context) error {
const maxUnblockHosts = 500
_, err := ds.UnblockHostsUpcomingActivityQueue(ctx, maxUnblockHosts)
@ -1796,14 +1796,14 @@ func newBatchActivitiesSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronScheduledBatchActivities)
defaultInterval = 2 * time.Minute
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
w := worker.NewWorker(ds, logger)
@ -1835,7 +1835,7 @@ func newAndroidMDMDeviceReconcilerSchedule(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
licenseKey string,
) (*schedule.Schedule, error) {
const (
@ -1843,7 +1843,7 @@ func newAndroidMDMDeviceReconcilerSchedule(
defaultInterval = 1 * time.Hour
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1859,7 +1859,7 @@ func cronEnableAndroidAppReportsOnDefaultPolicy(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
androidSvc android.Service,
) (*schedule.Schedule, error) {
const (
@ -1867,7 +1867,7 @@ func cronEnableAndroidAppReportsOnDefaultPolicy(
defaultInterval = 24 * time.Hour
priorJobDiff = -(defaultInterval - 45*time.Second)
)
logger = kitlog.With(logger, "cron", name, "component", name)
logger = logger.With("cron", name, "component", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),
@ -1885,7 +1885,7 @@ func cronMigrateToPerHostPolicy(
ctx context.Context,
instanceID string,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
androidSvc android.Service,
) (*schedule.Schedule, error) {
const (
@ -1893,7 +1893,7 @@ func cronMigrateToPerHostPolicy(
defaultInterval = 24 * time.Hour
priorJobDiff = -(defaultInterval - 30*time.Second)
)
logger = kitlog.With(logger, "cron", name, "component", name)
logger = logger.With("cron", name, "component", name)
s := schedule.New(
ctx, name, instanceID, defaultInterval, ds, ds,
schedule.WithLogger(logger),

View file

@ -18,9 +18,8 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/mock"
mdmmock "github.com/fleetdm/fleet/v4/server/mock/mdm"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/go-kit/log"
kitlog "github.com/go-kit/log"
)
func TestNewAppleMDMProfileManagerWithoutConfig(t *testing.T) {
@ -28,7 +27,7 @@ func TestNewAppleMDMProfileManagerWithoutConfig(t *testing.T) {
mdmStorage := &mdmmock.MDMAppleStore{}
ds := new(mock.Store)
cmdr := apple_mdm.NewMDMAppleCommander(mdmStorage, nil)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
sch, err := newAppleMDMProfileManagerSchedule(ctx, "foo", ds, cmdr, logger)
require.NotNil(t, sch)
@ -38,7 +37,7 @@ func TestNewAppleMDMProfileManagerWithoutConfig(t *testing.T) {
func TestNewWindowsMDMProfileManagerWithoutConfig(t *testing.T) {
ctx := context.Background()
ds := new(mock.Store)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
sch, err := newWindowsMDMProfileManagerSchedule(ctx, "foo", ds, logger)
require.NotNil(t, sch)
@ -89,7 +88,7 @@ func TestMigrateABMTokenDuringDEPCronJob(t *testing.T) {
err = depStorage.StoreConfig(ctx, apple_mdm.UnsavedABMTokenOrgName, &nanodep_client.Config{BaseURL: srv.URL})
require.NoError(t, err)
logger := log.NewNopLogger()
logger := logging.NewNopLogger()
syncFn := appleMDMDEPSyncerJob(ds, depStorage, logger)
err = syncFn(ctx)
require.NoError(t, err)

View file

@ -11,7 +11,6 @@ import (
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/shellquote"
kitlog "github.com/go-kit/log"
_ "github.com/go-sql-driver/mysql"
"github.com/spf13/cobra"
otelsdklog "go.opentelemetry.io/otel/sdk/log"
@ -133,8 +132,10 @@ func applyDevFlags(cfg *config.FleetConfig) {
}
}
// initLogger creates a kitlog.Logger backed by slog.
func initLogger(cfg config.FleetConfig, loggerProvider *otelsdklog.LoggerProvider) kitlog.Logger {
// initLogger creates a *Logger backed by slog.
// Returning the concrete type allows callers to access the underlying
// slog.Logger via SlogLogger() when needed for migrated packages.
func initLogger(cfg config.FleetConfig, loggerProvider *otelsdklog.LoggerProvider) *logging.Logger {
slogLogger := logging.NewSlogLogger(logging.Options{
JSON: cfg.Logging.JSON,
Debug: cfg.Logging.Debug,
@ -142,5 +143,5 @@ func initLogger(cfg config.FleetConfig, loggerProvider *otelsdklog.LoggerProvide
OtelLogsEnabled: cfg.Logging.OtelLogsEnabled,
LoggerProvider: loggerProvider,
})
return logging.NewKitlogAdapter(slogLogger)
return logging.NewLogger(slogLogger)
}

View file

@ -303,7 +303,7 @@ the way that the Fleet server works.
// NOTE this will disable OTEL/APM interceptor
if dev_mode.Env("FLEET_DEV_ENABLE_SQL_INTERCEPTOR") != "" {
opts = append(opts, mysql.WithInterceptor(&devSQLInterceptor{
logger: kitlog.With(logger, "component", "sql-interceptor"),
logger: logger.With("component", "sql-interceptor"),
}))
}
@ -421,7 +421,7 @@ the way that the Fleet server works.
ds = redisWrapperDS
resultStore := pubsub.NewRedisQueryResults(redisPool, config.Redis.DuplicateResults,
log.With(logger, "component", "query-results"),
logger.With("component", "query-results"),
)
liveQueryStore := live_query.NewRedisLiveQuery(redisPool, logger, liveQueryMemCacheDuration)
ssoSessionStore := sso.NewSessionStore(redisPool)
@ -585,7 +585,7 @@ the way that the Fleet server works.
}
var mdmPushService push.Pusher
nanoMDMLogger := service.NewNanoMDMLogger(kitlog.With(logger, "component", "apple-mdm-push"))
nanoMDMLogger := service.NewNanoMDMLogger(logger.With("component", "apple-mdm-push"))
pushProviderFactory := buford.NewPushProviderFactory(buford.WithNewClient(func(cert *tls.Certificate) (*http.Client, error) {
return fleethttp.NewClient(fleethttp.WithTLSClientConfig(&tls.Config{
Certificates: []tls.Certificate{*cert},
@ -1292,7 +1292,7 @@ the way that the Fleet server works.
level.Info(logger).Log("msg", fmt.Sprintf("started cron schedules: %s", strings.Join(cronSchedules.ScheduleNames(), ", ")))
// StartCollectors starts a goroutine per collector, using ctx to cancel.
task.StartCollectors(ctx, kitlog.With(logger, "cron", "async_task"))
task.StartCollectors(ctx, logger.With("cron", "async_task"))
// Flush seen hosts every second
hostsAsyncCfg := config.Osquery.AsyncConfigForTask(configpkg.AsyncTaskHostLastSeen)
@ -1325,7 +1325,7 @@ the way that the Fleet server works.
svc = service.NewMetricsService(svc, requestCount, requestLatency)
httpLogger := kitlog.With(logger, "component", "http")
httpLogger := logger.With("component", "http")
limiterStore := &redis.ThrottledStore{
Pool: redisPool,
@ -1334,7 +1334,7 @@ the way that the Fleet server works.
var httpSigVerifier func(http.Handler) http.Handler
if license.IsPremium() {
httpSigVerifier, err = httpsig.Middleware(ds, config.Auth.RequireHTTPMessageSignature, kitlog.With(logger, "component", "http-sig-verifier"))
httpSigVerifier, err = httpsig.Middleware(ds, config.Auth.RequireHTTPMessageSignature, logger.With("component", "http-sig-verifier"))
if err != nil {
initFatal(err, "initializing HTTP signature verifier")
}
@ -1482,7 +1482,7 @@ the way that the Fleet server works.
}
// Host identify and conditional access SCEP feature only works if a private key has been set up
if len(config.Server.PrivateKey) > 0 {
hostIdentitySCEPDepot, err := mds.NewHostIdentitySCEPDepot(kitlog.With(logger, "component", "host-id-scep-depot"), &config)
hostIdentitySCEPDepot, err := mds.NewHostIdentitySCEPDepot(logger.With("component", "host-id-scep-depot"), &config)
if err != nil {
initFatal(err, "setup host identity SCEP depot")
}
@ -1491,7 +1491,7 @@ the way that the Fleet server works.
}
// Conditional Access SCEP
condAccessSCEPDepot, err := mds.NewConditionalAccessSCEPDepot(kitlog.With(logger, "component", "conditional-access-scep-depot"), &config)
condAccessSCEPDepot, err := mds.NewConditionalAccessSCEPDepot(logger.With("component", "conditional-access-scep-depot"), &config)
if err != nil {
initFatal(err, "setup conditional access SCEP depot")
}

View file

@ -24,12 +24,10 @@ import (
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/tokenpki"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/schedule"
"github.com/go-kit/log"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/jmoiron/sqlx"
"github.com/smallstep/pkcs7"
"github.com/stretchr/testify/assert"
@ -296,7 +294,7 @@ func TestAutomationsSchedule(t *testing.T) {
defer cancelFunc()
failingPoliciesSet := service.NewMemFailingPolicySet()
s, err := newAutomationsSchedule(ctx, "test_instance", ds, kitlog.NewNopLogger(), 5*time.Minute, failingPoliciesSet)
s, err := newAutomationsSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), 5*time.Minute, failingPoliciesSet)
require.NoError(t, err)
s.Start()
@ -355,7 +353,7 @@ func TestCronVulnerabilitiesCreatesDatabasesPath(t *testing.T) {
// Use schedule to test that the schedule does indeed call cronVulnerabilities.
ctx = license.NewContext(ctx, &fleet.LicenseInfo{Tier: fleet.TierPremium})
ctx, cancel := context.WithCancel(ctx)
lg := kitlog.NewJSONLogger(os.Stdout)
lg := logging.NewNopLogger()
go func() {
defer func() {
@ -416,8 +414,7 @@ func (f *softwareIterator) Close() error { return nil }
func TestScanVulnerabilities(t *testing.T) {
nettest.Run(t)
logger := kitlog.NewNopLogger()
logger = level.NewFilter(logger, level.AllowDebug())
logger := logging.NewNopLogger()
ctx := context.Background()
@ -600,8 +597,7 @@ func TestScanVulnerabilities(t *testing.T) {
}
func TestUpdateVulnHostCounts(t *testing.T) {
logger := kitlog.NewNopLogger()
logger = level.NewFilter(logger, level.AllowDebug())
logger := logging.NewNopLogger()
ctx := context.Background()
@ -645,8 +641,7 @@ func TestUpdateVulnHostCounts(t *testing.T) {
}
func TestScanVulnerabilitiesMkdirFailsIfVulnPathIsFile(t *testing.T) {
logger := kitlog.NewNopLogger()
logger = level.NewFilter(logger, level.AllowDebug())
logger := logging.NewNopLogger()
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
@ -722,7 +717,7 @@ func TestCronVulnerabilitiesSkipMkdirIfDisabled(t *testing.T) {
// Use schedule to test that the schedule does indeed call cronVulnerabilities.
ctx = license.NewContext(ctx, &fleet.LicenseInfo{Tier: fleet.TierPremium})
ctx, cancel := context.WithCancel(ctx)
s, err := newVulnerabilitiesSchedule(ctx, "test_instance", ds, kitlog.NewNopLogger(), &config)
s, err := newVulnerabilitiesSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), &config)
require.NoError(t, err)
s.Start()
t.Cleanup(func() {
@ -806,7 +801,7 @@ func TestAutomationsScheduleLockDuration(t *testing.T) {
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
s, err := newAutomationsSchedule(ctx, "test_instance", ds, kitlog.NewNopLogger(), 1*time.Second, service.NewMemFailingPolicySet())
s, err := newAutomationsSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), 1*time.Second, service.NewMemFailingPolicySet())
require.NoError(t, err)
s.Start()
@ -872,7 +867,7 @@ func TestAutomationsScheduleIntervalChange(t *testing.T) {
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
s, err := newAutomationsSchedule(ctx, "test_instance", ds, kitlog.NewNopLogger(), 200*time.Millisecond, service.NewMemFailingPolicySet())
s, err := newAutomationsSchedule(ctx, "test_instance", ds, logging.NewNopLogger(), 200*time.Millisecond, service.NewMemFailingPolicySet())
require.NoError(t, err)
s.Start()
@ -1019,7 +1014,7 @@ func TestDebugMux(t *testing.T) {
func TestVerifyDiskEncryptionKeysJob(t *testing.T) {
ds := new(mock.Store)
ctx := context.Background()
logger := log.NewNopLogger()
logger := logging.NewNopLogger()
testCert, testKey, err := apple_mdm.NewSCEPCACertKey()
require.NoError(t, err)

View file

@ -14,7 +14,7 @@ import (
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/fleet"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
"github.com/spf13/cobra"
)
@ -40,8 +40,7 @@ by an exit code of zero.`,
applyDevFlags(&cfg)
}
logger := initLogger(cfg, nil)
logger = kitlog.With(logger, fleet.CronVulnerabilities)
logger := initLogger(cfg, nil).With("cron", fleet.CronVulnerabilities)
licenseInfo, err := initLicense(&cfg, devLicense, devExpiredLicense)
if err != nil {
@ -136,7 +135,7 @@ by an exit code of zero.`,
return vulnProcessingCmd
}
func configureVulnPath(vulnConfig config.VulnerabilitiesConfig, appConfig *fleet.AppConfig, logger kitlog.Logger) (vulnPath string) {
func configureVulnPath(vulnConfig config.VulnerabilitiesConfig, appConfig *fleet.AppConfig, logger *logging.Logger) (vulnPath string) {
switch {
case vulnConfig.DatabasesPath != "" && appConfig != nil && appConfig.VulnerabilitySettings.DatabasesPath != "":
vulnPath = vulnConfig.DatabasesPath
@ -159,7 +158,7 @@ type NamedVulnFunc struct {
VulnFunc func(ctx context.Context) error
}
func getVulnFuncs(ds fleet.Datastore, logger kitlog.Logger, config *config.VulnerabilitiesConfig) []NamedVulnFunc {
func getVulnFuncs(ds fleet.Datastore, logger *logging.Logger, config *config.VulnerabilitiesConfig) []NamedVulnFunc {
vulnFuncs := []NamedVulnFunc{
{
Name: "cron_vulnerabilities",

View file

@ -10,10 +10,9 @@ import (
"github.com/fleetdm/fleet/v4/cmd/fleetctl/fleetctl/testing_utils"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/live_query/live_query_mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/pubsub"
"github.com/fleetdm/fleet/v4/server/service"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -22,8 +21,7 @@ func TestSavedLiveQuery(t *testing.T) {
rs := pubsub.NewInmemQueryResults()
lq := live_query_mock.New(t)
logger := kitlog.NewJSONLogger(os.Stdout)
logger = level.NewFilter(logger, level.AllowDebug())
logger := logging.NewJSONLogger(os.Stdout)
_, ds := testing_utils.RunServerWithMockedDS(t, &service.TestServerOpts{
Rs: rs,
@ -196,8 +194,7 @@ func TestAdHocLiveQuery(t *testing.T) {
rs := pubsub.NewInmemQueryResults()
lq := live_query_mock.New(t)
logger := kitlog.NewJSONLogger(os.Stdout)
logger = level.NewFilter(logger, level.AllowDebug())
logger := logging.NewJSONLogger(os.Stdout)
_, ds := testing_utils.RunServerWithMockedDS(
t, &service.TestServerOpts{

View file

@ -11,9 +11,9 @@ import (
"github.com/fleetdm/fleet/v4/cmd/fleetctl/fleetctl/testing_utils"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/schedule"
kitlog "github.com/go-kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -54,7 +54,7 @@ func TestTrigger(t *testing.T) {
os.Stdout = w
_, _ = testing_utils.RunServerWithMockedDS(t, &service.TestServerOpts{
Logger: kitlog.NewNopLogger(),
Logger: logging.NewNopLogger(),
StartCronSchedules: []service.TestNewScheduleFunc{
func(ctx context.Context, ds fleet.Datastore) fleet.NewCronScheduleFunc {
return func() (fleet.CronSchedule, error) {

View file

@ -15,7 +15,7 @@ import (
"github.com/cenkalti/backoff/v4"
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
"github.com/fleetdm/fleet/v4/server/fleet"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
"github.com/google/uuid"
"golang.org/x/oauth2/google"
@ -53,7 +53,7 @@ var (
type GoogleCalendarConfig struct {
Context context.Context
IntegrationConfig *fleet.GoogleCalendarIntegration
Logger kitlog.Logger
Logger *logging.Logger
ServerURL string
// Should be nil for production
API GoogleCalendarAPI
@ -108,7 +108,7 @@ type eventDetails struct {
type GoogleCalendarLowLevelAPI struct {
service *calendar.Service
logger kitlog.Logger
logger *logging.Logger
serverURL string
}

View file

@ -9,7 +9,7 @@ import (
"github.com/fleetdm/fleet/v4/ee/server/calendar/load_test"
"github.com/fleetdm/fleet/v4/server/fleet"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
@ -62,7 +62,7 @@ func (s *googleCalendarIntegrationTestSuite) TestCreateGetDeleteEvent() {
"private_key": s.server.URL,
}},
},
Logger: kitlog.NewLogfmtLogger(kitlog.NewSyncWriter(os.Stdout)),
Logger: logging.NewLogfmtLogger(os.Stdout),
}
gCal := NewGoogleCalendar(config)
err := gCal.Configure(userEmail)
@ -129,7 +129,7 @@ func (s *googleCalendarIntegrationTestSuite) TestFillUpCalendar() {
"private_key": s.server.URL,
}},
},
Logger: kitlog.NewLogfmtLogger(kitlog.NewSyncWriter(os.Stdout)),
Logger: logging.NewLogfmtLogger(os.Stdout),
}
gCal := NewGoogleCalendar(config)
err := gCal.Configure(userEmail)

View file

@ -7,7 +7,7 @@ import (
"errors"
"fmt"
"github.com/fleetdm/fleet/v4/pkg/fleethttp"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"google.golang.org/api/calendar/v3"
"google.golang.org/api/googleapi"
"io"
@ -18,7 +18,7 @@ import (
// GoogleCalendarLoadAPI is used for load testing.
type GoogleCalendarLoadAPI struct {
Logger kitlog.Logger
Logger *logging.Logger
baseUrl string
userToImpersonate string
ctx context.Context
@ -30,7 +30,7 @@ type GoogleCalendarLoadAPI struct {
func (lowLevelAPI *GoogleCalendarLoadAPI) Configure(ctx context.Context, _ string, privateKey string, userToImpersonate string,
serverURL string) error {
if lowLevelAPI.Logger == nil {
lowLevelAPI.Logger = kitlog.With(kitlog.NewLogfmtLogger(os.Stderr), "mock", "GoogleCalendarLoadAPI", "user", userToImpersonate)
lowLevelAPI.Logger = logging.NewLogfmtLogger(os.Stderr).With("mock", "GoogleCalendarLoadAPI", "user", userToImpersonate)
}
lowLevelAPI.baseUrl = privateKey
lowLevelAPI.userToImpersonate = userToImpersonate

View file

@ -10,13 +10,13 @@ import (
"sync"
"time"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"google.golang.org/api/calendar/v3"
"google.golang.org/api/googleapi"
)
type GoogleCalendarMockAPI struct {
logger kitlog.Logger
logger *logging.Logger
}
type channel struct {
@ -36,7 +36,7 @@ const latency = 200 * time.Millisecond
// Configure creates a new Google Calendar service using the provided credentials.
func (lowLevelAPI *GoogleCalendarMockAPI) Configure(_ context.Context, _ string, _ string, userToImpersonate string, _ string) error {
if lowLevelAPI.logger == nil {
lowLevelAPI.logger = kitlog.With(kitlog.NewLogfmtLogger(os.Stderr), "mock", "GoogleCalendarMockAPI", "user", userToImpersonate)
lowLevelAPI.logger = logging.NewLogfmtLogger(os.Stderr).With("mock", "GoogleCalendarMockAPI", "user", userToImpersonate)
}
return nil
}

View file

@ -10,7 +10,7 @@ import (
"time"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/calendar/v3"
@ -26,7 +26,7 @@ const (
var (
baseCtx = context.Background()
logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
logger = logging.NewLogfmtLogger(os.Stdout)
)
type MockGoogleCalendarLowLevelAPI struct {

View file

@ -7,10 +7,9 @@ import (
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/integrationtest"
"github.com/go-kit/kit/log"
kitlog "github.com/go-kit/log"
"github.com/stretchr/testify/require"
)
@ -37,8 +36,8 @@ func SetUpSuiteWithConfig(t *testing.T, uniqueTestName string, configModifier fu
configModifier(&fleetCfg)
}
logger := log.NewLogfmtLogger(os.Stdout)
condAccessSCEPDepot, err := ds.NewConditionalAccessSCEPDepot(kitlog.With(logger, "component", "conditional-access-scep-depot"), &fleetCfg)
logger := logging.NewLogfmtLogger(os.Stdout)
condAccessSCEPDepot, err := ds.NewConditionalAccessSCEPDepot(logger.With("component", "conditional-access-scep-depot"), &fleetCfg)
require.NoError(t, err)
users, server := service.RunServerForTestsWithServiceWithDS(t, ctx, ds, fleetSvc, &service.TestServerOpts{

View file

@ -11,10 +11,9 @@ import (
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/integrationtest"
"github.com/go-kit/kit/log"
kitlog "github.com/go-kit/log"
"github.com/stretchr/testify/require"
)
@ -57,8 +56,8 @@ func SetUpSuiteWithConfig(t *testing.T, uniqueTestName string, requireSignature
configModifier(&fleetCfg)
}
logger := log.NewLogfmtLogger(os.Stdout)
hostIdentitySCEPDepot, err := ds.NewHostIdentitySCEPDepot(kitlog.With(logger, "component", "host-id-scep-depot"), &fleetCfg)
logger := logging.NewLogfmtLogger(os.Stdout)
hostIdentitySCEPDepot, err := ds.NewHostIdentitySCEPDepot(logger.With("component", "host-id-scep-depot"), &fleetCfg)
require.NoError(t, err)
users, server := service.RunServerForTestsWithServiceWithDS(t, ctx, ds, fleetSvc, &service.TestServerOpts{
License: license,

View file

@ -5,9 +5,9 @@ import (
"testing"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/integrationtest"
"github.com/go-kit/log"
)
type Suite struct {
@ -22,7 +22,7 @@ func SetUpSuite(t *testing.T, uniqueTestName string) *Suite {
ds, fleetCfg, fleetSvc, ctx := integrationtest.SetUpMySQLAndService(t, uniqueTestName, &service.TestServerOpts{
License: license,
})
logger := log.NewLogfmtLogger(os.Stdout)
logger := logging.NewLogfmtLogger(os.Stdout)
users, server := service.RunServerForTestsWithServiceWithDS(t, ctx, ds, fleetSvc, &service.TestServerOpts{
License: license,
FleetConfig: &fleetCfg,

View file

@ -16,6 +16,7 @@ import (
"github.com/fleetdm/fleet/v4/server/authz"
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service/middleware/auth"
"github.com/fleetdm/fleet/v4/server/service/middleware/log"
kitlog "github.com/go-kit/log"
@ -31,7 +32,7 @@ func RegisterSCIM(
mux *http.ServeMux,
ds fleet.Datastore,
svc fleet.Service,
logger kitlog.Logger,
logger *logging.Logger,
fleetConfig *config.FleetConfig,
) error {
if fleetConfig == nil {
@ -158,7 +159,7 @@ func RegisterSCIM(
},
}
scimLogger := kitlog.With(logger, "component", "SCIM")
scimLogger := logger.With("component", "SCIM")
resourceTypes := []scim.ResourceType{
{
ID: optional.NewString("User"),

View file

@ -9,9 +9,9 @@ import (
authz_ctx "github.com/fleetdm/fleet/v4/server/contexts/authz"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/calendar"
"github.com/go-kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -389,7 +389,7 @@ func TestCalendarWebhookErrorCases(t *testing.T) {
ds: ds,
distributedLock: lock,
authz: auth,
logger: log.NewNopLogger(),
logger: logging.NewNopLogger(),
}
// Apply test-specific mocks

View file

@ -18,9 +18,9 @@ import (
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
scep_mock "github.com/fleetdm/fleet/v4/server/mock/scep"
"github.com/fleetdm/fleet/v4/server/platform/logging"
common_mysql "github.com/fleetdm/fleet/v4/server/platform/mysql"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/go-kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -200,7 +200,7 @@ func TestCreatingCertificateAuthorities(t *testing.T) {
require.NoError(t, err)
svc := &Service{
logger: log.NewLogfmtLogger(os.Stdout),
logger: logging.NewLogfmtLogger(os.Stdout),
ds: ds,
authz: authorizer,
digiCertService: digicert.NewService(),
@ -1195,7 +1195,7 @@ func TestUpdatingCertificateAuthorities(t *testing.T) {
require.NoError(t, err)
svc := &Service{
logger: log.NewLogfmtLogger(os.Stdout),
logger: logging.NewLogfmtLogger(os.Stdout),
ds: ds,
authz: authorizer,
digiCertService: digicert.NewService(),

View file

@ -20,6 +20,7 @@ import (
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
"github.com/fleetdm/fleet/v4/server/dev_mode"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service/middleware/log"
"github.com/fleetdm/fleet/v4/server/service/middleware/otel"
kitlog "github.com/go-kit/log"
@ -61,7 +62,7 @@ func (e *notFoundError) IsNotFound() bool {
// idpService implements the Okta conditional access IdP functionality.
type idpService struct {
ds fleet.Datastore
logger kitlog.Logger
logger *logging.Logger
certSerialFormat string
}
@ -69,7 +70,7 @@ type idpService struct {
func RegisterIdP(
mux *http.ServeMux,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
fleetConfig *config.FleetConfig,
) error {
if fleetConfig == nil {
@ -78,7 +79,7 @@ func RegisterIdP(
svc := &idpService{
ds: ds,
logger: kitlog.With(logger, "component", "conditional-access-idp"),
logger: logger.With("component", "conditional-access-idp"),
certSerialFormat: fleetConfig.ConditionalAccess.CertSerialFormat,
}
@ -565,7 +566,7 @@ func (s *idpService) buildIdentityProvider(ctx context.Context, serverURL string
ssoURL = ssoURL.JoinPath(idpSSOPath)
// Create kitlog adapter for SAML library
samlLogger := &kitlogAdapter{logger: kitlog.With(s.logger, "component", "saml-idp")}
samlLogger := &kitlogAdapter{logger: s.logger.With("component", "saml-idp")}
// Build IdentityProvider
// Note: SessionProvider is set dynamically in serveSSO based on the authenticated device

View file

@ -15,8 +15,8 @@ import (
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
common_mysql "github.com/fleetdm/fleet/v4/server/platform/mysql"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
)
@ -76,13 +76,13 @@ b1ctZeF7HaWwFdTC8GqWI6zzRFn+YA3f/yYibhowuEypPQeSjlI=
func newTestService() (*idpService, *mock.Store) {
ds := new(mock.Store)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
return &idpService{ds: ds, logger: logger, certSerialFormat: config.CertSerialFormatHex}, ds
}
func newTestServiceWithCertFormat(certFormat string) (*idpService, *mock.Store) {
ds := new(mock.Store)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
return &idpService{ds: ds, logger: logger, certSerialFormat: certFormat}, ds
}
@ -117,7 +117,7 @@ func mockCertAssetsFunc(includeCerts bool) func(context.Context, []fleet.MDMAsse
func TestRegisterIdP(t *testing.T) {
ds := new(mock.Store)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
cfg := &config.FleetConfig{}
ds.AppConfigFunc = mockAppConfigFunc("https://fleet.example.com")
@ -526,7 +526,7 @@ func TestParseCertAndKeyBytes(t *testing.T) {
}
func TestDeviceHealthSessionProvider(t *testing.T) {
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
now := time.Now()
tests := []struct {

View file

@ -3,13 +3,13 @@ package condaccess
import (
"fmt"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
)
// kitlogAdapter adapts kitlog.Logger to saml logger.Interface
// kitlogAdapter adapts to saml logger.Interface
type kitlogAdapter struct {
logger kitlog.Logger
logger *logging.Logger
}
func (k *kitlogAdapter) Printf(format string, v ...interface{}) {

View file

@ -15,9 +15,9 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/assets"
scepdepot "github.com/fleetdm/fleet/v4/server/mdm/scep/depot"
scepserver "github.com/fleetdm/fleet/v4/server/mdm/scep/server"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service/middleware/otel"
"github.com/go-kit/kit/log"
kitlog "github.com/go-kit/log"
"github.com/smallstep/scep"
)
@ -45,7 +45,7 @@ func RegisterSCEP(
mux *http.ServeMux,
scepStorage scepdepot.Depot,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
fleetConfig *config.FleetConfig,
) error {
if fleetConfig == nil {
@ -68,10 +68,10 @@ func RegisterSCEP(
scepService := NewSCEPService(
ds,
signer,
kitlog.With(logger, "component", "conditional-access-scep"),
logger.With("component", "conditional-access-scep"),
)
scepLogger := kitlog.With(logger, "component", "http-conditional-access-scep")
scepLogger := logger.With("component", "http-conditional-access-scep")
e := scepserver.MakeServerEndpoints(scepService)
e.GetEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.GetEndpoint)
e.PostEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.PostEndpoint)

View file

@ -18,8 +18,8 @@ import (
"github.com/fleetdm/fleet/v4/server"
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-json-experiment/json"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"software.sslmate.com/src/go-pkcs12"
)
@ -36,7 +36,7 @@ const (
)
type Service struct {
logger kitlog.Logger
logger *logging.Logger
timeout time.Duration
}
@ -60,7 +60,7 @@ func WithTimeout(t time.Duration) Opt {
}
// WithLogger sets the logger to use for the service.
func WithLogger(logger kitlog.Logger) Opt {
func WithLogger(logger *logging.Logger) Opt {
return func(s *Service) {
s.logger = logger
}
@ -119,7 +119,7 @@ func (s *Service) populateOpts(opts []Opt) {
s.timeout = defaultTimeout
}
if s.logger == nil {
s.logger = kitlog.NewLogfmtLogger(kitlog.NewSyncWriter(os.Stdout))
s.logger = logging.NewLogfmtLogger(os.Stderr)
}
}

View file

@ -25,9 +25,9 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/assets"
scepdepot "github.com/fleetdm/fleet/v4/server/mdm/scep/depot"
scepserver "github.com/fleetdm/fleet/v4/server/mdm/scep/server"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service/middleware/otel"
"github.com/go-kit/kit/log"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/smallstep/scep"
)
@ -67,7 +67,7 @@ func RegisterSCEP(
mux *http.ServeMux,
scepStorage scepdepot.Depot,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
fleetConfig *config.FleetConfig,
) error {
if fleetConfig == nil {
@ -87,10 +87,10 @@ func RegisterSCEP(
scepService := NewSCEPService(
ds,
signer,
kitlog.With(logger, "component", "host-id-scep"),
logger.With("component", "host-id-scep"),
)
scepLogger := kitlog.With(logger, "component", "http-host-id-scep")
scepLogger := logger.With("component", "http-host-id-scep")
e := scepserver.MakeServerEndpoints(scepService)
e.GetEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.GetEndpoint)
e.PostEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.PostEndpoint)
@ -140,7 +140,7 @@ func hasRenewalExtension(csr *x509.CertificateRequest) bool {
}
// renewalMiddleware handles certificate renewal with proof-of-possession
func renewalMiddleware(ds fleet.Datastore, logger kitlog.Logger, next scepserver.CSRSignerContext) scepserver.CSRSignerContextFunc {
func renewalMiddleware(ds fleet.Datastore, logger *logging.Logger, next scepserver.CSRSignerContext) scepserver.CSRSignerContextFunc {
return func(ctx context.Context, m *scep.CSRReqMessage) (*x509.Certificate, error) {
// Check if this is a renewal request
var renewalData types.RenewalData

View file

@ -25,11 +25,11 @@ import (
mdmtesting "github.com/fleetdm/fleet/v4/server/mdm/testing_utils"
"github.com/fleetdm/fleet/v4/server/mock"
nanodep_mock "github.com/fleetdm/fleet/v4/server/mock/nanodep"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/fleetdm/fleet/v4/server/worker"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
@ -40,7 +40,7 @@ func setupMockDatastorePremiumService(t testing.TB) (*mock.Store, *eeservice.Ser
lic := &fleet.LicenseInfo{Tier: fleet.TierPremium}
ctx := license.NewContext(context.Background(), lic)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
fleetConfig := config.FleetConfig{
MDM: config.MDMConfig{
AppleSCEPCertBytes: eeservice.TestCert,

View file

@ -17,9 +17,9 @@ import (
"github.com/fleetdm/fleet/v4/server/contexts/viewer"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
common_mysql "github.com/fleetdm/fleet/v4/server/platform/mysql"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
)
@ -150,7 +150,7 @@ func TestRequestCertificate(t *testing.T) {
authorizer, err := authz.NewAuthorizer()
require.NoError(t, err)
logger := log.NewLogfmtLogger(os.Stdout)
logger := logging.NewLogfmtLogger(os.Stdout)
svc := &Service{
logger: logger,
ds: ds,

View file

@ -10,8 +10,8 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/android"
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/storage"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/sso"
kitlog "github.com/go-kit/log"
)
// Service wraps a free Service and implements additional premium functionality on top of it.
@ -19,7 +19,7 @@ type Service struct {
fleet.Service
ds fleet.Datastore
logger kitlog.Logger
logger *logging.Logger
config config.FleetConfig
clock clock.Clock
authz *authz.Authorizer
@ -42,7 +42,7 @@ type Service struct {
func NewService(
svc fleet.Service,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
config config.FleetConfig,
mailService fleet.MailService,
c clock.Clock,

View file

@ -30,7 +30,6 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/mdm"
common_mysql "github.com/fleetdm/fleet/v4/server/platform/mysql"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/go-kit/log"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/google/uuid"
@ -2074,7 +2073,7 @@ func (svc *Service) softwareBatchUpload(
if batchErr != nil {
status = fmt.Sprintf("%s%s", batchSetFailedPrefix, batchErr)
}
logger := log.With(svc.logger,
logger := svc.logger.With(
"request_uuid", requestUUID,
"team_id", teamID,
"payloads", len(payloads),

View file

@ -13,11 +13,10 @@ import (
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/calendar"
"github.com/fleetdm/fleet/v4/server/service/schedule"
"github.com/go-kit/log"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/google/uuid"
)
@ -33,12 +32,12 @@ func NewCalendarSchedule(
ds fleet.Datastore,
distributedLock fleet.Lock,
serverConfig config.CalendarConfig,
logger kitlog.Logger,
logger *logging.Logger,
) (*schedule.Schedule, error) {
const (
name = string(fleet.CronCalendar)
)
logger = kitlog.With(logger, "cron", name)
logger = logger.With("cron", name)
s := schedule.New(
ctx, name, instanceID, serverConfig.Periodicity, ds, ds,
schedule.WithAltLockID("calendar"),
@ -61,7 +60,7 @@ func NewCalendarSchedule(
}
func cronCalendarEvents(ctx context.Context, ds fleet.Datastore, distributedLock fleet.Lock, serverConfig config.CalendarConfig,
logger kitlog.Logger) error {
logger *logging.Logger) error {
appConfig, err := ds.AppConfig(ctx)
if err != nil {
return fmt.Errorf("load app config: %w", err)
@ -108,7 +107,7 @@ func cronCalendarEventsForTeam(
team fleet.Team,
orgName string,
domain string,
logger kitlog.Logger,
logger *logging.Logger,
) error {
if team.Config.Integrations.GoogleCalendar == nil ||
!team.Config.Integrations.GoogleCalendar.Enable {
@ -128,7 +127,7 @@ func cronCalendarEventsForTeam(
return nil
}
logger = kitlog.With(logger, "team_id", team.ID)
logger = logger.With("team_id", team.ID)
//
// NOTEs:
@ -208,7 +207,7 @@ func processCalendarFailingHosts(
calendarConfig *calendar.Config,
orgName string,
hosts []fleet.HostPolicyMembershipData,
logger kitlog.Logger,
logger *logging.Logger,
) {
hosts = filterHostsWithSameEmail(hosts)
@ -224,7 +223,7 @@ func processCalendarFailingHosts(
defer wg.Done()
for host := range hostsCh {
logger := log.With(logger, "host_id", host.HostID)
logger := logger.With("host_id", host.HostID)
hostCalendarEvent, calendarEvent, err := ds.GetHostCalendarEventByEmail(ctx, host.Email)
@ -320,7 +319,7 @@ func processFailingHostExistingCalendarEvent(
host fleet.HostPolicyMembershipData,
policyIDtoPolicy *sync.Map,
calendarConfig *calendar.Config,
logger kitlog.Logger,
logger *logging.Logger,
) error {
// Try to acquire the lock. Lock is needed to ensure calendar callback is not processed for this event at the same time.
@ -512,7 +511,7 @@ func processFailingHostExistingCalendarEvent(
}
func getBodyTag(ctx context.Context, ds fleet.Datastore, host fleet.HostPolicyMembershipData, policyIDtoPolicy *sync.Map,
logger kitlog.Logger) string {
logger *logging.Logger) string {
var updatedBodyTag string
policyIDs := strings.Split(host.FailingPolicyIDs, ",")
if len(policyIDs) == 1 && policyIDs[0] != "" {
@ -586,7 +585,7 @@ func processFailingHostCreateCalendarEvent(
orgName string,
host fleet.HostPolicyMembershipData,
policyIDtoPolicy *sync.Map,
logger kitlog.Logger,
logger *logging.Logger,
) error {
calendarEvent, err := attemptCreatingEventOnUserCalendar(ctx, ds, orgName, host, userCalendar, policyIDtoPolicy, logger)
if err != nil {
@ -608,7 +607,7 @@ func attemptCreatingEventOnUserCalendar(
host fleet.HostPolicyMembershipData,
userCalendar fleet.UserCalendar,
policyIDtoPolicy *sync.Map,
logger kitlog.Logger,
logger *logging.Logger,
) (*fleet.CalendarEvent, error) {
year, month, today := time.Now().Date()
preferredDate := getPreferredCalendarEventDate(year, month, today)
@ -668,7 +667,7 @@ func removeCalendarEventsFromPassingHosts(
ds fleet.Datastore,
calendarConfig *calendar.Config,
hosts []fleet.HostPolicyMembershipData,
logger kitlog.Logger,
logger *logging.Logger,
) {
hostIDsByEmail := make(map[string][]uint)
for _, host := range hosts {
@ -731,7 +730,7 @@ func removeCalendarEventsFromPassingHosts(
func logHostsWithoutAssociatedEmail(
domain string,
hosts []fleet.HostPolicyMembershipData,
logger kitlog.Logger,
logger *logging.Logger,
) {
if len(hosts) == 0 {
return
@ -768,7 +767,7 @@ func isHostOnline(ctx context.Context, ds fleet.Datastore, hostID uint) (bool, e
}
}
func cronCalendarEventsCleanup(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger) error {
func cronCalendarEventsCleanup(ctx context.Context, ds fleet.Datastore, logger *logging.Logger) error {
appConfig, err := ds.AppConfig(ctx)
if err != nil {
return fmt.Errorf("load app config: %w", err)
@ -833,7 +832,7 @@ func deleteAllCalendarEvents(
ds fleet.Datastore,
calendarConfig *calendar.Config,
teamID *uint,
logger kitlog.Logger,
logger *logging.Logger,
) error {
calendarEvents, err := ds.ListCalendarEvents(ctx, teamID)
if err != nil {
@ -845,7 +844,7 @@ func deleteAllCalendarEvents(
func deleteCalendarEventsInParallel(
ctx context.Context, ds fleet.Datastore, calendarConfig *calendar.Config, calendarEvents []*fleet.CalendarEvent,
logger kitlog.Logger,
logger *logging.Logger,
) {
if len(calendarEvents) > 0 {
calendarEventCh := make(chan *fleet.CalendarEvent)
@ -879,7 +878,7 @@ func cleanupTeamCalendarEvents(
ds fleet.Datastore,
calendarConfig *calendar.Config,
team fleet.Team,
logger kitlog.Logger,
logger *logging.Logger,
) error {
teamFeatureEnabled := team.Config.Integrations.GoogleCalendar != nil && team.Config.Integrations.GoogleCalendar.Enable

View file

@ -16,9 +16,9 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/redis_lock"
kitlog "github.com/go-kit/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -129,7 +129,7 @@ func TestEventForDifferentHost(t *testing.T) {
t.Parallel()
ds := new(mock.Store)
ctx := context.Background()
logger := kitlog.With(kitlog.NewLogfmtLogger(os.Stdout))
logger := logging.NewLogfmtLogger(os.Stdout)
ds.AppConfigFunc = func(ctx context.Context) (*fleet.AppConfig, error) {
return &fleet.AppConfig{
Integrations: fleet.Integrations{
@ -208,7 +208,7 @@ func TestEventForDifferentHost(t *testing.T) {
func TestCalendarEventsMultipleHosts(t *testing.T) {
ds := new(mock.Store)
ctx := context.Background()
logger := kitlog.With(kitlog.NewLogfmtLogger(os.Stdout))
logger := logging.NewLogfmtLogger(os.Stdout)
t.Cleanup(func() {
calendar.ClearMockEvents()
calendar.ClearMockChannels()
@ -403,11 +403,11 @@ func (n notFoundErr) Error() string {
func TestCalendarEvents1KHosts(t *testing.T) {
ds := new(mock.Store)
ctx := context.Background()
var logger kitlog.Logger
var logger *logging.Logger
if os.Getenv("CALENDAR_TEST_LOGGING") != "" {
logger = kitlog.With(kitlog.NewLogfmtLogger(os.Stdout))
logger = logging.NewLogfmtLogger(os.Stdout)
} else {
logger = kitlog.NewNopLogger()
logger = logging.NewNopLogger()
}
t.Cleanup(func() {
calendar.ClearMockEvents()
@ -715,7 +715,7 @@ func TestCalendarEvents1KHosts(t *testing.T) {
func TestEventBody(t *testing.T) {
ds := new(mock.Store)
ctx := context.Background()
logger := kitlog.With(kitlog.NewLogfmtLogger(os.Stdout))
logger := logging.NewLogfmtLogger(os.Stdout)
t.Cleanup(
func() {
calendar.ClearMockEvents()

View file

@ -17,7 +17,7 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/mock"
nanodep_mock "github.com/fleetdm/fleet/v4/server/mock/nanodep"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/stretchr/testify/require"
)
@ -67,7 +67,7 @@ func TestDEPClient(t *testing.T) {
}))
defer srv.Close()
logger := log.NewNopLogger()
logger := logging.NewNopLogger()
ds := new(mock.Store)
appCfg := fleet.AppConfig{}

View file

@ -4,7 +4,7 @@ package health
import (
"net/http"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
)
// Checker returns an error indicating if a service is in an unhealthy state.
@ -17,7 +17,7 @@ type Checker interface {
// Handler responds with either:
// 200 OK if the server can successfully communicate with it's backends or
// 500 if any of the backends are reporting an issue.
func Handler(logger log.Logger, allCheckers map[string]Checker) http.HandlerFunc {
func Handler(logger *logging.Logger, allCheckers map[string]Checker) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
checkers := make(map[string]Checker)
checks, ok := r.URL.Query()["check"]
@ -49,11 +49,11 @@ func Handler(logger log.Logger, allCheckers map[string]Checker) http.HandlerFunc
// CheckHealth checks multiple checkers returning false if any of them fail.
// CheckHealth logs the reason a checker fails.
func CheckHealth(logger log.Logger, checkers map[string]Checker) bool {
func CheckHealth(logger *logging.Logger, checkers map[string]Checker) bool {
healthy := true
for name, hc := range checkers {
if err := hc.HealthCheck(); err != nil {
log.With(logger, "component", "healthz").Log("err", err, "health-checker", name)
logger.With("component", "healthz").Log("err", err, "health-checker", name)
healthy = false
continue
}

View file

@ -6,7 +6,7 @@ import (
"net/http/httptest"
"testing"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -17,13 +17,13 @@ func TestCheckHealth(t *testing.T) {
"pass": Nop(),
}
healthy := CheckHealth(log.NewNopLogger(), checkers)
healthy := CheckHealth(logging.NewNopLogger(), checkers)
require.False(t, healthy)
checkers = map[string]Checker{
"pass": Nop(),
}
healthy = CheckHealth(log.NewNopLogger(), checkers)
healthy = CheckHealth(logging.NewNopLogger(), checkers)
require.True(t, healthy)
}
@ -34,7 +34,7 @@ func (c fail) HealthCheck() error {
}
func TestHealthzHandler(t *testing.T) {
logger := log.NewNopLogger()
logger := logging.NewNopLogger()
failCheck := healthcheckFunc(func() error {
return errors.New("health check failed")
})

View file

@ -17,7 +17,7 @@ import (
"github.com/fleetdm/fleet/v4/server/contexts/host"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/health"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/kolide/launcher/pkg/service"
"github.com/osquery/osquery-go/plugin/distributed"
"github.com/osquery/osquery-go/plugin/logger"
@ -26,7 +26,7 @@ import (
// launcherWrapper wraps the TLS interface.
type launcherWrapper struct {
tls fleet.OsqueryService
logger log.Logger
logger *logging.Logger
healthCheckers map[string]health.Checker
}

View file

@ -7,8 +7,8 @@ import (
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/health"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service/mock"
"github.com/go-kit/log"
"github.com/kolide/launcher/pkg/service"
"github.com/osquery/osquery-go/plugin/distributed"
"github.com/stretchr/testify/assert"
@ -93,7 +93,7 @@ func newTestService(t *testing.T) (*launcherWrapper, *mock.TLSService) {
tls := newTLSService(t)
launcher := &launcherWrapper{
tls: tls,
logger: log.NewNopLogger(),
logger: logging.NewNopLogger(),
healthCheckers: map[string]health.Checker{
"noop": health.Nop(),
},

View file

@ -6,12 +6,12 @@ import (
"strings"
kithttp "github.com/go-kit/kit/transport/http"
"github.com/go-kit/log"
launcher "github.com/kolide/launcher/pkg/service"
grpc "google.golang.org/grpc"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/health"
"github.com/fleetdm/fleet/v4/server/platform/logging"
)
// Handler extends the grpc.Server, providing Handler that allows us to serve
@ -23,7 +23,7 @@ type Handler struct {
// New creates a gRPC server to handle remote requests from launcher.
func New(
tls fleet.OsqueryService,
logger log.Logger,
logger *logging.Logger,
grpcServer *grpc.Server,
healthCheckers map[string]health.Checker,
) *Handler {

View file

@ -2,16 +2,16 @@ package logging
import (
nanodep_log "github.com/fleetdm/fleet/v4/server/mdm/nanodep/log"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
)
// NanoDEPLogger is a logger adapter for nanodep.
type NanoDEPLogger struct {
logger kitlog.Logger
logger *logging.Logger
}
func NewNanoDEPLogger(logger kitlog.Logger) *NanoDEPLogger {
func NewNanoDEPLogger(logger *logging.Logger) *NanoDEPLogger {
return &NanoDEPLogger{
logger: logger,
}
@ -26,8 +26,7 @@ func (l *NanoDEPLogger) Debug(keyvals ...interface{}) {
}
func (l *NanoDEPLogger) With(keyvals ...interface{}) nanodep_log.Logger {
newLogger := kitlog.With(l.logger, keyvals...)
return &NanoDEPLogger{
logger: newLogger,
logger: l.logger.With(keyvals...),
}
}

View file

@ -11,7 +11,7 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/assets"
depclient "github.com/fleetdm/fleet/v4/server/mdm/nanodep/client"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/storage"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
)
// SetABMTokenMetadata uses the provided ABM token to fetch the associated
@ -23,7 +23,7 @@ func SetABMTokenMetadata(
abmToken *fleet.ABMToken,
depStorage storage.AllDEPStorage,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
renewal bool,
) error {
decryptedToken, err := assets.ABMToken(ctx, ds, abmToken.OrganizationName)
@ -42,7 +42,7 @@ func SetDecryptedABMTokenMetadata(
decryptedToken *depclient.OAuth1Tokens,
depStorage storage.AllDEPStorage,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
renewal bool,
) error {
depClient := NewDEPClient(depStorage, ds, logger)

View file

@ -28,7 +28,7 @@ import (
depclient "github.com/fleetdm/fleet/v4/server/mdm/nanodep/client"
nanodep_storage "github.com/fleetdm/fleet/v4/server/mdm/nanodep/storage"
depsync "github.com/fleetdm/fleet/v4/server/mdm/nanodep/sync"
kitlog "github.com/go-kit/log"
platformlogging "github.com/fleetdm/fleet/v4/server/platform/logging"
)
const (
@ -92,7 +92,7 @@ type DEPService struct {
ds fleet.Datastore
depStorage nanodep_storage.AllDEPStorage
depClient *godep.Client
logger kitlog.Logger
logger *platformlogging.Logger
}
// getDefaultProfile returns a godep.Profile with default values set.
@ -443,7 +443,7 @@ func (d *DEPService) EnsureCustomSetupAssistantIfExists(ctx context.Context, tea
}
func (d *DEPService) RunAssigner(ctx context.Context) error {
syncerLogger := logging.NewNanoDEPLogger(kitlog.With(d.logger, "component", "nanodep-syncer"))
syncerLogger := logging.NewNanoDEPLogger(d.logger.With("component", "nanodep-syncer"))
teams, err := d.ds.ListTeams(
ctx, fleet.TeamFilter{
User: &fleet.User{
@ -572,7 +572,7 @@ func (d *DEPService) AssignMDMAppleServiceDiscoveryURL(ctx context.Context, toke
func NewDEPService(
ds fleet.Datastore,
depStorage nanodep_storage.AllDEPStorage,
logger kitlog.Logger,
logger *platformlogging.Logger,
) *DEPService {
depSvc := &DEPService{
depStorage: depStorage,
@ -747,15 +747,15 @@ func (d *DEPService) processDeviceResponse(
n, err := d.ds.IngestMDMAppleDevicesFromDEPSync(ctx, addedDevicesSlice, abmTokenID, macOSTeam, iosTeam, ipadTeam)
switch {
case err != nil:
level.Error(kitlog.With(d.logger)).Log("err", err)
level.Error(d.logger).Log("err", err)
ctxerr.Handle(ctx, err)
case n > 0:
level.Info(kitlog.With(d.logger)).Log("msg", fmt.Sprintf("added %d new mdm device(s) to pending hosts", n))
level.Info(d.logger).Log("msg", fmt.Sprintf("added %d new mdm device(s) to pending hosts", n))
case n == 0:
level.Debug(kitlog.With(d.logger)).Log("msg", "no DEP hosts to add")
level.Debug(d.logger).Log("msg", "no DEP hosts to add")
}
level.Info(kitlog.With(d.logger)).Log("msg", "devices to assign DEP profiles",
level.Info(d.logger).Log("msg", "devices to assign DEP profiles",
"to_add", strings.Join(addedSerials, ", "),
"to_remove", strings.Join(deletedSerials, ", "),
"to_modify", strings.Join(modifiedSerials, ", "),
@ -801,7 +801,7 @@ func (d *DEPService) processDeviceResponse(
level.Info(d.logger).Log("msg", "preparing to upsert DEP assignment for existing host", "serial", existingHost.HardwareSerial, "host_id", existingHost.ID)
md, ok := modifiedDevices[existingHost.HardwareSerial]
if !ok {
level.Error(kitlog.With(d.logger)).Log("msg",
level.Error(d.logger).Log("msg",
"serial coming from ABM is in the database, but it's not in the list of modified devices", "serial",
existingHost.HardwareSerial)
continue
@ -851,7 +851,7 @@ func (d *DEPService) processDeviceResponse(
continue
}
logger := kitlog.With(d.logger, "profile_uuid", profUUID)
logger := d.logger.With("profile_uuid", profUUID)
skipSerials, assignSerials, err := d.ds.ScreenDEPAssignProfileSerialsForCooldown(ctx, serials)
if err != nil {
@ -916,7 +916,7 @@ func (d *DEPService) processDeviceResponse(
}
if len(skippedSerials) > 0 {
level.Info(kitlog.With(d.logger)).Log("msg", "found devices that already have the right profile, skipping assignment", "serials",
level.Info(d.logger).Log("msg", "found devices that already have the right profile, skipping assignment", "serials",
fmt.Sprintf("%s", skippedSerials))
}
@ -970,7 +970,7 @@ func logCountsForResults(deviceResults map[string]string) (out []interface{}) {
// storage that will flag the ABM token's terms expired field and the
// AppConfig's AppleBMTermsExpired field whenever the status of the terms
// changes.
func NewDEPClient(storage godep.ClientStorage, updater fleet.ABMTermsUpdater, logger kitlog.Logger) *godep.Client {
func NewDEPClient(storage godep.ClientStorage, updater fleet.ABMTermsUpdater, logger *platformlogging.Logger) *godep.Client {
return godep.NewClient(storage, fleethttp.NewClient(), godep.WithAfterHook(func(ctx context.Context, reqErr error) error {
// to check for ABM terms expired, we must have an ABM token organization
// name and NOT a raw ABM token in the context (as the presence of a raw
@ -1400,7 +1400,8 @@ func (pb *ProfileBimap) add(wantedProfile, currentProfile *fleet.MDMAppleProfile
// NewActivityFunc is the function signature for creating a new activity.
type NewActivityFunc func(ctx context.Context, user *fleet.User, activity fleet.ActivityDetails) error
func IOSiPadOSRefetch(ctx context.Context, ds fleet.Datastore, commander *MDMAppleCommander, logger kitlog.Logger, newActivityFn NewActivityFunc) error {
func IOSiPadOSRefetch(ctx context.Context, ds fleet.Datastore, commander *MDMAppleCommander, logger *platformlogging.Logger,
newActivityFn NewActivityFunc) error {
appCfg, err := ds.AppConfig(ctx)
if err != nil {
return ctxerr.Wrap(ctx, err, "fetching app config")
@ -1513,7 +1514,8 @@ func IOSiPadOSRefetch(ctx context.Context, ds fleet.Datastore, commander *MDMApp
// turnOffMDMIfAPNSFailed checks if the error is an APNSDeliveryError and turns off MDM for the failed devices.
// Returns a boolean value to indicate whether or not MDM was turned off.
func turnOffMDMIfAPNSFailed(ctx context.Context, ds fleet.Datastore, err error, logger kitlog.Logger, newActivityFn NewActivityFunc) (bool, error) {
func turnOffMDMIfAPNSFailed(ctx context.Context, ds fleet.Datastore, err error, logger *platformlogging.Logger, newActivityFn NewActivityFunc) (bool,
error) {
var e *APNSDeliveryError
if !errors.As(err, &e) {
return false, nil
@ -1578,7 +1580,7 @@ func GenerateOTAEnrollmentProfileMobileconfig(orgName, fleetURL, enrollSecret, i
return profileBuf.Bytes(), nil
}
func IOSiPadOSRevive(ctx context.Context, ds fleet.Datastore, commander *MDMAppleCommander, logger kitlog.Logger) error {
func IOSiPadOSRevive(ctx context.Context, ds fleet.Datastore, commander *MDMAppleCommander, logger *platformlogging.Logger) error {
appCfg, err := ds.AppConfig(ctx)
if err != nil {
return ctxerr.Wrap(ctx, err, "fetching app config")

View file

@ -16,8 +16,8 @@ import (
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
nanodep_client "github.com/fleetdm/fleet/v4/server/mdm/nanodep/client"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
)
@ -41,7 +41,7 @@ func TestDEPService_RunAssigner(t *testing.T) {
mysql.SetTestABMAssets(t, ds, abmTokenOrgName)
logger := log.NewNopLogger()
logger := logging.NewNopLogger()
return apple_mdm.NewDEPService(ds, depStorage, logger)
}

View file

@ -15,7 +15,7 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/mock"
nanodep_mock "github.com/fleetdm/fleet/v4/server/mock/nanodep"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/micromdm/plist"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -25,7 +25,7 @@ func TestDEPService(t *testing.T) {
t.Run("EnsureDefaultSetupAssistant", func(t *testing.T) {
ds := new(mock.Store)
ctx := context.Background()
logger := log.NewNopLogger()
logger := logging.NewNopLogger()
depStorage := new(nanodep_mock.Storage)
depSvc := NewDEPService(ds, depStorage, logger)
defaultProfile := depSvc.getDefaultProfile()

View file

@ -3,50 +3,45 @@ package logging
import (
"context"
"log/slog"
"slices"
kitlog "github.com/go-kit/log"
)
// KitlogAdapter wraps a slog.Logger to implement the kitlog.Logger interface.
// Logger wraps a slog.Logger to implement the kitlog.Logger interface.
// This allows gradual migration from kitlog to slog by providing a drop-in
// replacement that uses slog under the hood.
type KitlogAdapter struct {
type Logger struct {
logger *slog.Logger
// attrs holds any attributes added via With()
attrs []any
}
// NewKitlogAdapter creates a new adapter that implements kitlog.Logger
// using the provided slog.Logger.
func NewKitlogAdapter(logger *slog.Logger) kitlog.Logger {
return &KitlogAdapter{
// NewLogger creates a new adapter that implements kitlog.Logger
// using the provided slog.Logger. It returns *Logger to preserve
// type information, allowing callers to access SlogLogger() directly.
func NewLogger(logger *slog.Logger) *Logger {
return &Logger{
logger: logger,
}
}
// Log implements kitlog.Logger. It converts key-value pairs to slog attributes
// and logs at the appropriate level based on the "level" key if present.
func (a *KitlogAdapter) Log(keyvals ...any) error {
if len(keyvals) == 0 && len(a.attrs) == 0 {
func (a *Logger) Log(keyvals ...any) error {
if len(keyvals) == 0 {
return nil
}
// Combine pre-set attrs with new keyvals
allKeyvals := slices.Concat(a.attrs, keyvals)
// Extract level and message from keyvals
level := slog.LevelInfo
msg := ""
attrs := make([]slog.Attr, 0, len(allKeyvals)/2)
attrs := make([]slog.Attr, 0, len(keyvals)/2)
for i := 0; i < len(allKeyvals)-1; i += 2 {
key, ok := allKeyvals[i].(string)
for i := 0; i < len(keyvals)-1; i += 2 {
key, ok := keyvals[i].(string)
if !ok {
// If key isn't a string, skip this pair
continue
}
val := allKeyvals[i+1]
val := keyvals[i+1]
switch key {
case "level":
@ -68,10 +63,11 @@ func (a *KitlogAdapter) Log(keyvals ...any) error {
}
// With returns a new logger with the given key-value pairs added to every log.
func (a *KitlogAdapter) With(keyvals ...any) kitlog.Logger {
return &KitlogAdapter{
logger: a.logger,
attrs: slices.Concat(a.attrs, keyvals),
// It returns *Logger (not kitlog.Logger) to preserve type information,
// allowing callers to access SlogLogger() without type assertions.
func (a *Logger) With(keyvals ...any) *Logger {
return &Logger{
logger: a.logger.With(keyvals...),
}
}
@ -104,5 +100,11 @@ func kitlogLevelToSlog(val any) slog.Level {
}
}
// Ensure KitlogAdapter implements kitlog.Logger at compile time.
var _ kitlog.Logger = (*KitlogAdapter)(nil)
// SlogLogger returns the underlying slog.Logger.
// This is useful when migrating code from kitlog to slog.
func (a *Logger) SlogLogger() *slog.Logger {
return a.logger
}
// Ensure Logger implements kitlog.Logger at compile time.
var _ kitlog.Logger = (*Logger)(nil)

View file

@ -17,7 +17,7 @@ func newTestAdapter(t *testing.T) (*testutils.TestHandler, kitlog.Logger) {
t.Helper()
handler := testutils.NewTestHandler()
slogLogger := slog.New(handler)
return handler, NewKitlogAdapter(slogLogger)
return handler, NewLogger(slogLogger)
}
func TestKitlogAdapter(t *testing.T) {
@ -42,8 +42,8 @@ func TestKitlogAdapter(t *testing.T) {
t.Parallel()
handler, adapter := newTestAdapter(t)
kitlogAdapter, ok := adapter.(*KitlogAdapter)
require.True(t, ok, "adapter should be *KitlogAdapter")
kitlogAdapter, ok := adapter.(*Logger)
require.True(t, ok, "adapter should be *Logger")
contextLogger := kitlogAdapter.With("component", "test-component")
err := contextLogger.Log("msg", "message with context")

View file

@ -148,3 +148,32 @@ func (h *OtelTracingHandler) WithGroup(name string) slog.Handler {
// Ensure OtelTracingHandler implements slog.Handler at compile time.
var _ slog.Handler = (*OtelTracingHandler)(nil)
// DiscardHandler is a slog.Handler that discards all log records.
type DiscardHandler struct{}
func (DiscardHandler) Enabled(context.Context, slog.Level) bool { return false }
func (DiscardHandler) Handle(context.Context, slog.Record) error { return nil }
func (d DiscardHandler) WithAttrs([]slog.Attr) slog.Handler { return d }
func (d DiscardHandler) WithGroup(string) slog.Handler { return d }
// Ensure DiscardHandler implements slog.Handler at compile time.
var _ slog.Handler = DiscardHandler{}
// NewNopLogger returns a no-op *Logger that discards all log output.
// Use this in tests instead of kitlog.NewNopLogger() to maintain type safety.
func NewNopLogger() *Logger {
return NewLogger(slog.New(DiscardHandler{}))
}
// NewLogfmtLogger creates a *Logger that outputs text-formatted logs to the given writer.
// This is a drop-in replacement for kitlog.NewLogfmtLogger().
func NewLogfmtLogger(output io.Writer) *Logger {
return NewLogger(NewSlogLogger(Options{Output: output, Debug: true}))
}
// NewJSONLogger creates a *Logger that outputs JSON-formatted logs to the given writer.
// This is a drop-in replacement for kitlog.NewJSONLogger().
func NewJSONLogger(output io.Writer) *Logger {
return NewLogger(NewSlogLogger(Options{Output: output, JSON: true, Debug: true}))
}

View file

@ -10,7 +10,7 @@ import (
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
"github.com/fleetdm/fleet/v4/server/datastore/redis"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
redigo "github.com/gomodule/redigo/redis"
)
@ -18,14 +18,14 @@ import (
type redisQueryResults struct {
pool fleet.RedisPool
duplicateResults bool
logger log.Logger
logger *logging.Logger
}
var _ fleet.QueryResultStore = &redisQueryResults{}
// NewRedisQueryResults creats a new Redis implementation of the
// QueryResultStore interface using the provided Redis connection pool.
func NewRedisQueryResults(pool fleet.RedisPool, duplicateResults bool, logger log.Logger) *redisQueryResults {
func NewRedisQueryResults(pool fleet.RedisPool, duplicateResults bool, logger *logging.Logger) *redisQueryResults {
return &redisQueryResults{
pool: pool,
duplicateResults: duplicateResults,
@ -86,7 +86,7 @@ func writeOrDone(ctx context.Context, ch chan<- interface{}, item interface{}) b
// connection over the provided channel. This effectively allows a select
// statement to run on conn.Receive() (by selecting on outChan that is
// passed into this function)
func receiveMessages(ctx context.Context, conn *redigo.PubSubConn, outChan chan<- interface{}, logger log.Logger) {
func receiveMessages(ctx context.Context, conn *redigo.PubSubConn, outChan chan<- any, logger *logging.Logger) {
defer close(outChan)
for {
@ -136,7 +136,7 @@ func (r *redisQueryResults) ReadChannel(ctx context.Context, query fleet.Distrib
var wg sync.WaitGroup
logger := log.With(r.logger, "campaignID", query.ID)
logger := r.logger.With("campaignID", query.ID)
// Run a separate goroutine feeding redis messages into msgChannel.
wg.Add(+1)

View file

@ -4,11 +4,11 @@ import (
"testing"
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
)
func SetupRedisForTest(t *testing.T, cluster, readReplica bool) *redisQueryResults {
const dupResults = false
pool := redistest.SetupRedis(t, "zz", cluster, false, readReplica)
return NewRedisQueryResults(pool, dupResults, log.NewNopLogger())
return NewRedisQueryResults(pool, dupResults, logging.NewNopLogger())
}

View file

@ -31,6 +31,7 @@ import (
"github.com/fleetdm/fleet/v4/pkg/optjson"
"github.com/fleetdm/fleet/v4/server"
platform_http "github.com/fleetdm/fleet/v4/server/platform/http"
platformlogging "github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/authz"
"github.com/fleetdm/fleet/v4/server/config"
@ -57,7 +58,6 @@ import (
"github.com/fleetdm/fleet/v4/server/platform/endpointer"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/variables"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/google/uuid"
"github.com/micromdm/plist"
@ -3321,7 +3321,7 @@ func (svc *Service) MDMAppleDisableFileVaultAndEscrow(ctx context.Context, teamI
type MDMAppleCheckinAndCommandService struct {
ds fleet.Datastore
logger kitlog.Logger
logger *platformlogging.Logger
commander *apple_mdm.MDMAppleCommander
vppInstaller fleet.AppleMDMVPPInstaller
mdmLifecycle *mdmlifecycle.HostLifecycle
@ -3335,7 +3335,7 @@ func NewMDMAppleCheckinAndCommandService(
commander *apple_mdm.MDMAppleCommander,
vppInstaller fleet.AppleMDMVPPInstaller,
isPremium bool,
logger kitlog.Logger,
logger *platformlogging.Logger,
keyValueStore fleet.KeyValueStore,
) *MDMAppleCheckinAndCommandService {
mdmLifecycle := mdmlifecycle.New(ds, logger, newActivity)
@ -4065,7 +4065,7 @@ func (svc *MDMAppleCheckinAndCommandService) handleScheduledUpdates(
host *fleet.Host,
softwares []fleet.Software,
) error {
logger := kitlog.With(svc.logger,
logger := svc.logger.With(
"method", "handle_scheduled_updates",
"host_id", host.ID,
)
@ -4144,7 +4144,7 @@ func (svc *MDMAppleCheckinAndCommandService) handleScheduledUpdates(
// 1. Filter out software that is not within the configured update window in the host timezone.
var softwaresWithinUpdateSchedule []fleet.SoftwareAutoUpdateSchedule
for _, softwareWithAutoUpdateSchedule := range softwaresWithAutoUpdateSchedule {
logger := kitlog.With(logger,
logger := logger.With(
"software_title_id", softwareWithAutoUpdateSchedule.TitleID,
"team_id", softwareWithAutoUpdateSchedule.TeamID,
"update_window_start", softwareWithAutoUpdateSchedule.AutoUpdateStartTime,
@ -4201,7 +4201,7 @@ func (svc *MDMAppleCheckinAndCommandService) handleScheduledUpdates(
)
continue
}
logger := kitlog.With(logger,
logger := logger.With(
"name", softwareTitle.Name,
"bundle_identifier", softwareTitle.BundleIdentifier,
"source", softwareTitle.Source,
@ -4386,7 +4386,7 @@ func (svc *MDMAppleCheckinAndCommandService) handleScheduledUpdates(
if softwareTitle.BundleIdentifier != nil {
bundleIdentifier = *softwareTitle.BundleIdentifier
}
logger := kitlog.With(logger,
logger := logger.With(
"software_title_id", softwareTitle.ID,
"team_id", host.TeamID,
"adam_id", softwareTitle.AppStoreApp.AdamID,
@ -4728,7 +4728,7 @@ func mdmAppleDeliveryStatusFromCommandStatus(cmdStatus string) *fleet.MDMDeliver
// This profile will be installed to all hosts in the team (or "no team",) but it
// will only be used by hosts that have a fleetd installation without an enroll
// secret and fleet URL (mainly DEP enrolled hosts).
func ensureFleetProfiles(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger, signingCertDER []byte) error {
func ensureFleetProfiles(ctx context.Context, ds fleet.Datastore, logger *platformlogging.Logger, signingCertDER []byte) error {
appCfg, err := ds.AppConfig(ctx)
if err != nil {
return ctxerr.Wrap(ctx, err, "fetching app config")
@ -4810,7 +4810,7 @@ func SendPushesToPendingDevices(
ctx context.Context,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *platformlogging.Logger,
) error {
enrollmentIDs, err := ds.GetEnrollmentIDsWithPendingMDMAppleCommands(ctx)
if err != nil {
@ -4839,7 +4839,7 @@ func ReconcileAppleDeclarations(
ctx context.Context,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *platformlogging.Logger,
) error {
appConfig, err := ds.AppConfig(ctx)
if err != nil {
@ -4909,7 +4909,7 @@ func ReconcileAppleProfiles(
ctx context.Context,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *platformlogging.Logger,
) error {
appConfig, err := ds.AppConfig(ctx)
if err != nil {
@ -5343,7 +5343,7 @@ func ReconcileAppleProfiles(
}
func findProfilesWithSecrets(
logger kitlog.Logger,
logger *platformlogging.Logger,
installTargets map[string]*cmdTarget,
profileContents map[string]mobileconfig.Mobileconfig,
) (map[string]struct{}, error) {
@ -5369,7 +5369,7 @@ func preprocessProfileContents(
ds fleet.Datastore,
scepConfig fleet.SCEPConfigService,
digiCertService fleet.DigiCertService,
logger kitlog.Logger,
logger *platformlogging.Logger,
targets map[string]*cmdTarget,
profileContents map[string]mobileconfig.Mobileconfig,
hostProfilesToInstallMap map[hostProfileUUID]*fleet.MDMAppleBulkUpsertHostProfilePayload,
@ -6116,7 +6116,7 @@ const maxCertsRenewalPerRun = 100
func RenewSCEPCertificates(
ctx context.Context,
logger kitlog.Logger,
logger *platformlogging.Logger,
ds fleet.Datastore,
config *config.FleetConfig,
commander *apple_mdm.MDMAppleCommander,
@ -6301,7 +6301,7 @@ func renewSCEPWithProfile(
ctx context.Context,
ds fleet.Datastore,
commander *apple_mdm.MDMAppleCommander,
logger kitlog.Logger,
logger *platformlogging.Logger,
assocs []fleet.SCEPIdentityAssociation,
profile []byte,
) error {
@ -6339,10 +6339,10 @@ func renewSCEPWithProfile(
// [1]: https://developer.apple.com/documentation/devicemanagement/declarative_management_checkin
type MDMAppleDDMService struct {
ds fleet.Datastore
logger kitlog.Logger
logger *platformlogging.Logger
}
func NewMDMAppleDDMService(ds fleet.Datastore, logger kitlog.Logger) *MDMAppleDDMService {
func NewMDMAppleDDMService(ds fleet.Datastore, logger *platformlogging.Logger) *MDMAppleDDMService {
return &MDMAppleDDMService{
ds: ds,
logger: logger,
@ -7269,7 +7269,8 @@ func (svc *Service) MDMAppleProcessOTAEnrollment(
// EnsureMDMAppleServiceDiscovery checks if the service discovery URL is set up correctly with Apple
// and assigns it if necessary.
func EnsureMDMAppleServiceDiscovery(ctx context.Context, ds fleet.Datastore, depStorage storage.AllDEPStorage, logger kitlog.Logger, urlPrefix string) error {
func EnsureMDMAppleServiceDiscovery(ctx context.Context, ds fleet.Datastore, depStorage storage.AllDEPStorage, logger *platformlogging.Logger,
urlPrefix string) error {
depSvc := apple_mdm.NewDEPService(ds, depStorage, logger)
ac, err := ds.AppConfig(ctx)

View file

@ -11,8 +11,8 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/mdm"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
)
@ -20,7 +20,7 @@ import (
func TestDeclarativeManagement_DeclarationItems(t *testing.T) {
ctx := t.Context()
ds := mysql.CreateMySQLDS(t)
logger := log.NewLogfmtLogger(os.Stdout)
logger := logging.NewLogfmtLogger(os.Stdout)
ddmService := MDMAppleDDMService{
ds: ds,
logger: logger,

View file

@ -49,10 +49,10 @@ import (
mdmmock "github.com/fleetdm/fleet/v4/server/mock/mdm"
nanodep_mock "github.com/fleetdm/fleet/v4/server/mock/nanodep"
scep_mock "github.com/fleetdm/fleet/v4/server/mock/scep"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/redis_key_value"
"github.com/fleetdm/fleet/v4/server/test"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
micromdm "github.com/micromdm/micromdm/mdm/mdm"
@ -104,7 +104,7 @@ func setupAppleMDMService(t *testing.T, license *fleet.LicenseInfo) (fleet.Servi
mdmStorage,
mdmStorage,
pushFactory,
NewNanoMDMLogger(kitlog.NewJSONLogger(os.Stdout)),
NewNanoMDMLogger(logging.NewJSONLogger(os.Stdout)),
)
opts := &TestServerOpts{
@ -1202,12 +1202,12 @@ func TestMDMCommandAuthz(t *testing.T) {
func TestMDMAuthenticateManualEnrollment(t *testing.T) {
ds := new(mock.Store)
mdmLifecycle := mdmlifecycle.New(ds, kitlog.NewNopLogger(), newActivity)
mdmLifecycle := mdmlifecycle.New(ds, logging.NewNopLogger(), newActivity)
svc := MDMAppleCheckinAndCommandService{
ds: ds,
mdmLifecycle: mdmLifecycle,
keyValueStore: redis_key_value.New(redistest.NopRedis()),
logger: kitlog.NewNopLogger(),
logger: logging.NewNopLogger(),
}
ctx := context.Background()
uuid, serial, model := "ABC-DEF-GHI", "XYZABC", "MacBookPro 16,1"
@ -1252,12 +1252,12 @@ func TestMDMAuthenticateManualEnrollment(t *testing.T) {
func TestMDMAuthenticateADE(t *testing.T) {
ds := new(mock.Store)
mdmLifecycle := mdmlifecycle.New(ds, kitlog.NewNopLogger(), newActivity)
mdmLifecycle := mdmlifecycle.New(ds, logging.NewNopLogger(), newActivity)
svc := MDMAppleCheckinAndCommandService{
ds: ds,
mdmLifecycle: mdmLifecycle,
keyValueStore: redis_key_value.New(redistest.NopRedis()),
logger: kitlog.NewNopLogger(),
logger: logging.NewNopLogger(),
}
ctx := context.Background()
uuid, serial, model := "ABC-DEF-GHI", "XYZABC", "MacBookPro 16,1"
@ -1302,11 +1302,11 @@ func TestMDMAuthenticateADE(t *testing.T) {
func TestMDMAuthenticateSCEPRenewal(t *testing.T) {
ds := new(mock.Store)
mdmLifecycle := mdmlifecycle.New(ds, kitlog.NewNopLogger(), newActivity)
mdmLifecycle := mdmlifecycle.New(ds, logging.NewNopLogger(), newActivity)
svc := MDMAppleCheckinAndCommandService{
ds: ds,
mdmLifecycle: mdmLifecycle,
logger: kitlog.NewNopLogger(),
logger: logging.NewNopLogger(),
}
ctx := context.Background()
uuid, serial, model := "ABC-DEF-GHI", "XYZABC", "MacBookPro 16,1"
@ -1416,15 +1416,15 @@ func TestMDMTokenUpdate(t *testing.T) {
mdmStorage,
mdmStorage,
pushFactory,
NewNanoMDMLogger(kitlog.NewJSONLogger(os.Stdout)),
NewNanoMDMLogger(logging.NewJSONLogger(os.Stdout)),
)
cmdr := apple_mdm.NewMDMAppleCommander(mdmStorage, pusher)
mdmLifecycle := mdmlifecycle.New(ds, kitlog.NewNopLogger(), newActivity)
mdmLifecycle := mdmlifecycle.New(ds, logging.NewNopLogger(), newActivity)
svc := MDMAppleCheckinAndCommandService{
ds: ds,
mdmLifecycle: mdmLifecycle,
commander: cmdr,
logger: kitlog.NewNopLogger(),
logger: logging.NewNopLogger(),
}
uuid, serial, model, wantTeamID := "ABC-DEF-GHI", "XYZABC", "MacBookPro 16,1", uint(12)
@ -1604,15 +1604,15 @@ func TestMDMTokenUpdateIOS(t *testing.T) {
mdmStorage,
mdmStorage,
pushFactory,
NewNanoMDMLogger(kitlog.NewJSONLogger(os.Stdout)),
NewNanoMDMLogger(logging.NewJSONLogger(os.Stdout)),
)
cmdr := apple_mdm.NewMDMAppleCommander(mdmStorage, pusher)
mdmLifecycle := mdmlifecycle.New(ds, kitlog.NewNopLogger(), newActivity)
mdmLifecycle := mdmlifecycle.New(ds, logging.NewNopLogger(), newActivity)
svc := MDMAppleCheckinAndCommandService{
ds: ds,
mdmLifecycle: mdmLifecycle,
commander: cmdr,
logger: kitlog.NewNopLogger(),
logger: logging.NewNopLogger(),
}
uuid, serial, model, wantTeamID := "ABC-DEF-GHI", "XYZABC", "MacBookPro 16,1", uint(12)
@ -1763,11 +1763,11 @@ func TestMDMTokenUpdateIOS(t *testing.T) {
func TestMDMCheckout(t *testing.T) {
ds := new(mock.Store)
mdmLifecycle := mdmlifecycle.New(ds, kitlog.NewNopLogger(), newActivity)
mdmLifecycle := mdmlifecycle.New(ds, logging.NewNopLogger(), newActivity)
svc := MDMAppleCheckinAndCommandService{
ds: ds,
mdmLifecycle: mdmLifecycle,
logger: kitlog.NewNopLogger(),
logger: logging.NewNopLogger(),
}
ctx := context.Background()
uuid, serial, installedFromDEP, displayName, platform := "ABC-DEF-GHI", "XYZABC", true, "Test's MacBook", "darwin"
@ -1908,7 +1908,7 @@ func TestMDMCommandAndReportResultsProfileHandling(t *testing.T) {
for i, c := range cases {
t.Run(fmt.Sprintf("%s%s-%d", c.requestType, c.status, i), func(t *testing.T) {
ds := new(mock.Store)
svc := MDMAppleCheckinAndCommandService{ds: ds, logger: kitlog.NewNopLogger()}
svc := MDMAppleCheckinAndCommandService{ds: ds, logger: logging.NewNopLogger()}
ds.GetMDMAppleCommandRequestTypeFunc = func(ctx context.Context, targetCmd string) (string, error) {
require.Equal(t, commandUUID, targetCmd)
return c.requestType, nil
@ -2675,7 +2675,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
mdmStorage,
mdmStorage,
pushFactory,
NewNanoMDMLogger(kitlog.NewNopLogger()),
NewNanoMDMLogger(logging.NewNopLogger()),
)
mdmConfig := config.MDMConfig{
AppleSCEPCert: "./testdata/server.pem",
@ -3023,7 +3023,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
failedCount++
require.Len(t, payload, 0)
}
err := ReconcileAppleProfiles(ctx, ds, cmdr, kitlog.NewNopLogger())
err := ReconcileAppleProfiles(ctx, ds, cmdr, logging.NewNopLogger())
require.NoError(t, err)
require.Equal(t, 1, failedCount)
checkAndReset(t, true, &ds.ListMDMAppleProfilesToInstallAndRemoveFuncInvoked)
@ -3070,7 +3070,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
}
enqueueFailForOp = fleet.MDMOperationTypeRemove
err := ReconcileAppleProfiles(ctx, ds, cmdr, kitlog.NewNopLogger())
err := ReconcileAppleProfiles(ctx, ds, cmdr, logging.NewNopLogger())
require.NoError(t, err)
require.Equal(t, 1, failedCount)
checkAndReset(t, true, &ds.ListMDMAppleProfilesToInstallAndRemoveFuncInvoked)
@ -3143,7 +3143,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
}
enqueueFailForOp = fleet.MDMOperationTypeInstall
err := ReconcileAppleProfiles(ctx, ds, cmdr, kitlog.NewNopLogger())
err := ReconcileAppleProfiles(ctx, ds, cmdr, logging.NewNopLogger())
require.NoError(t, err)
require.Equal(t, 1, failedCount)
checkAndReset(t, true, &ds.ListMDMAppleProfilesToInstallAndRemoveFuncInvoked)
@ -3317,7 +3317,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
contents1 = originalContents1
expectedContents1 = originalExpectedContents1
})
err := ReconcileAppleProfiles(ctx, ds, cmdr, kitlog.NewNopLogger())
err := ReconcileAppleProfiles(ctx, ds, cmdr, logging.NewNopLogger())
require.NoError(t, err)
assert.Equal(t, 2, upsertCount)
// checkAndReset(t, true, &ds.GetAllCertificateAuthoritiesFuncInvoked)
@ -3345,7 +3345,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
ds.GetHostEmailsFunc = func(ctx context.Context, hostUUID string, source string) ([]string, error) {
return nil, errors.New("GetHostEmailsFuncError")
}
err := ReconcileAppleProfiles(ctx, ds, cmdr, kitlog.NewNopLogger())
err := ReconcileAppleProfiles(ctx, ds, cmdr, logging.NewNopLogger())
assert.ErrorContains(t, err, "GetHostEmailsFuncError")
// checkAndReset(t, true, &ds.GetAllCertificateAuthoritiesFuncInvoked)
checkAndReset(t, true, &ds.ListMDMAppleProfilesToInstallAndRemoveFuncInvoked)
@ -3407,7 +3407,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
hostUUIDs = append(hostUUIDs, p.HostUUID)
}
err := ReconcileAppleProfiles(ctx, ds, cmdr, kitlog.NewNopLogger())
err := ReconcileAppleProfiles(ctx, ds, cmdr, logging.NewNopLogger())
require.NoError(t, err)
assert.Empty(t, hostUUIDs, "all host+profile combinations should be updated")
require.Equal(t, 5, failedCount, "number of profiles with bad content")
@ -3423,7 +3423,7 @@ func TestMDMAppleReconcileAppleProfiles(t *testing.T) {
func TestPreprocessProfileContents(t *testing.T) {
ctx := context.Background()
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
appCfg := &fleet.AppConfig{}
appCfg.ServerSettings.ServerURL = "https://test.example.com"
appCfg.MDM.EnabledAndConfigured = true
@ -3918,7 +3918,7 @@ func TestEnsureFleetdConfig(t *testing.T) {
testError := errors.New("test error")
testURL := "https://example.com"
testTeamName := "test-team"
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
mdmConfig := config.MDMConfig{
AppleSCEPCert: "./testdata/server.pem",
AppleSCEPKey: "./testdata/server.key",
@ -4394,9 +4394,10 @@ func generateCertWithAPNsTopic() ([]byte, []byte, error) {
return certPEM, keyPEM, nil
}
func setupTest(t *testing.T) (context.Context, kitlog.Logger, *mock.Store, *config.FleetConfig, *mdmmock.MDMAppleStore, *apple_mdm.MDMAppleCommander) {
func setupTest(t *testing.T) (context.Context, *logging.Logger, *mock.Store, *config.FleetConfig, *mdmmock.MDMAppleStore,
*apple_mdm.MDMAppleCommander) {
ctx := context.Background()
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
cfg := config.TestConfig()
ds := new(mock.Store)
mdmStorage := &mdmmock.MDMAppleStore{}
@ -4792,7 +4793,7 @@ func TestMDMCommandAndReportResultsIOSIPadOSRefetch(t *testing.T) {
lostModeCommandUUID := uuid.NewString()
ds := new(mock.Store)
svc := MDMAppleCheckinAndCommandService{ds: ds, logger: kitlog.NewNopLogger()}
svc := MDMAppleCheckinAndCommandService{ds: ds, logger: logging.NewNopLogger()}
ds.HostByIdentifierFunc = func(ctx context.Context, identifier string) (*fleet.Host, error) {
return &fleet.Host{
@ -5106,7 +5107,7 @@ func TestNeedsOSUpdateForDEPEnrollment(t *testing.T) {
return tt.platform, &tt.appleOSUpdateSettings, tt.returnedErr
}
svc := &Service{ds: ds, logger: kitlog.NewNopLogger()}
svc := &Service{ds: ds, logger: logging.NewNopLogger()}
t.Run(tt.name, func(t *testing.T) {
result, err := svc.needsOSUpdateForDEPEnrollment(ctx, tt.appleMachineInfo)
@ -5453,7 +5454,7 @@ func TestCheckMDMAppleEnrollmentWithMinimumOSVersion(t *testing.T) {
func TestPreprocessProfileContentsEndUserIDP(t *testing.T) {
ctx := context.Background()
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
appCfg := &fleet.AppConfig{}
appCfg.ServerSettings.ServerURL = "https://test.example.com"
appCfg.MDM.EnabledAndConfigured = true

View file

@ -14,7 +14,7 @@ import (
"github.com/fleetdm/fleet/v4/ee/server/calendar"
"github.com/fleetdm/fleet/v4/server/config"
"github.com/fleetdm/fleet/v4/server/fleet"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
)
@ -57,18 +57,18 @@ type PolicyLiteWithMeta struct {
mu sync.Mutex
}
func CreateUserCalendarFromConfig(ctx context.Context, config *Config, logger kitlog.Logger) fleet.UserCalendar {
func CreateUserCalendarFromConfig(ctx context.Context, config *Config, logger *logging.Logger) fleet.UserCalendar {
googleCalendarConfig := calendar.GoogleCalendarConfig{
Context: ctx,
IntegrationConfig: &config.GoogleCalendarIntegration,
ServerURL: config.ServerURL,
Logger: kitlog.With(logger, "component", "google_calendar"),
Logger: logger.With("component", "google_calendar"),
}
return calendar.NewGoogleCalendar(&googleCalendarConfig)
}
func GenerateCalendarEventBody(ctx context.Context, ds fleet.Datastore, orgName string, host fleet.HostPolicyMembershipData,
policyIDtoPolicy *sync.Map, conflict bool, logger kitlog.Logger,
policyIDtoPolicy *sync.Map, conflict bool, logger *logging.Logger,
) (body string, tag string) {
description, resolution, tag := getCalendarEventDescriptionAndResolution(ctx, ds, orgName, host, policyIDtoPolicy, logger)
@ -89,7 +89,7 @@ Please leave your device on and connected to power.
}
func getCalendarEventDescriptionAndResolution(ctx context.Context, ds fleet.Datastore, orgName string, host fleet.HostPolicyMembershipData,
policyIDtoPolicy *sync.Map, logger kitlog.Logger,
policyIDtoPolicy *sync.Map, logger *logging.Logger,
) (description string, resolution string, tag string) {
getDefaultDescription := func() string {
return fmt.Sprintf(`%s %s`, orgName, fleet.CalendarDefaultDescription)

View file

@ -37,8 +37,8 @@ import (
"github.com/fleetdm/fleet/v4/server/service/middleware/otel"
"github.com/docker/go-units"
"github.com/fleetdm/fleet/v4/server/platform/logging"
kithttp "github.com/go-kit/kit/transport/http"
kitlog "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gorilla/mux"
"github.com/klauspost/compress/gzhttp"
@ -103,7 +103,7 @@ func setCarveStoreInRequestContext(carveStore fleet.CarveStore) kithttp.RequestF
func MakeHandler(
svc fleet.Service,
config config.FleetConfig,
logger kitlog.Logger,
logger *logging.Logger,
limitStore throttled.GCRAStore,
redisPool fleet.RedisPool,
carveStore fleet.CarveStore,
@ -278,7 +278,7 @@ const (
)
func attachFleetAPIRoutes(r *mux.Router, svc fleet.Service, config config.FleetConfig,
logger kitlog.Logger, limitStore throttled.GCRAStore, redisPool fleet.RedisPool, opts []kithttp.ServerOption,
logger *logging.Logger, limitStore throttled.GCRAStore, redisPool fleet.RedisPool, opts []kithttp.ServerOption,
extra extraHandlerOpts,
) {
apiVersions := []string{"v1", "2022-04"}
@ -1108,7 +1108,7 @@ func attachFleetAPIRoutes(r *mux.Router, svc fleet.Service, config config.FleetC
// WithSetup is an http middleware that checks if setup procedures have been completed.
// If setup hasn't been completed it serves the API with a setup middleware.
// If the server is already configured, the default API handler is exposed.
func WithSetup(svc fleet.Service, logger kitlog.Logger, next http.Handler) http.HandlerFunc {
func WithSetup(svc fleet.Service, logger *logging.Logger, next http.Handler) http.HandlerFunc {
rxOsquery := regexp.MustCompile(`^/api/[^/]+/osquery`)
return func(w http.ResponseWriter, r *http.Request) {
configRouter := http.NewServeMux()
@ -1143,7 +1143,7 @@ func WithSetup(svc fleet.Service, logger kitlog.Logger, next http.Handler) http.
// RedirectLoginToSetup detects if the setup endpoint should be used. If setup is required it redirect all
// frontend urls to /setup, otherwise the frontend router is used.
func RedirectLoginToSetup(svc fleet.Service, logger kitlog.Logger, next http.Handler, urlPrefix string) http.HandlerFunc {
func RedirectLoginToSetup(svc fleet.Service, logger *logging.Logger, next http.Handler, urlPrefix string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
redirect := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/setup" {
@ -1171,7 +1171,7 @@ func RedirectLoginToSetup(svc fleet.Service, logger kitlog.Logger, next http.Han
// RedirectSetupToLogin forces the /setup path to be redirected to login. This middleware is used after
// the app has been setup.
func RedirectSetupToLogin(svc fleet.Service, logger kitlog.Logger, next http.Handler, urlPrefix string) http.HandlerFunc {
func RedirectSetupToLogin(svc fleet.Service, logger *logging.Logger, next http.Handler, urlPrefix string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/setup" {
newURL := r.URL
@ -1190,7 +1190,7 @@ func RegisterAppleMDMProtocolServices(
scepConfig config.MDMConfig,
mdmStorage fleet.MDMAppleStore,
scepStorage scep_depot.Depot,
logger kitlog.Logger,
logger *logging.Logger,
checkinAndCommandService nanomdm_service.CheckinAndCommandService,
ddmService nanomdm_service.DeclarativeManagement,
profileService nanomdm_service.ProfileService,
@ -1211,11 +1211,11 @@ func RegisterAppleMDMProtocolServices(
func registerMDMServiceDiscovery(
mux *http.ServeMux,
logger kitlog.Logger,
logger *logging.Logger,
serverURLPrefix string,
fleetConfig config.FleetConfig,
) error {
serviceDiscoveryLogger := kitlog.With(logger, "component", "mdm-apple-service-discovery")
serviceDiscoveryLogger := logger.With("component", "mdm-apple-service-discovery")
fullMDMEnrollmentURL := fmt.Sprintf("%s%s", serverURLPrefix, apple_mdm.AccountDrivenEnrollPath)
serviceDiscoveryHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serviceDiscoveryLogger.Log("msg", "serving MDM service discovery response", "url", fullMDMEnrollmentURL)
@ -1238,7 +1238,7 @@ func registerSCEP(
scepConfig config.MDMConfig,
scepStorage scep_depot.Depot,
mdmStorage fleet.MDMAppleStore,
logger kitlog.Logger,
logger *logging.Logger,
fleetConfig config.FleetConfig,
) error {
var signer scepserver.CSRSignerContext = scepserver.SignCSRAdapter(scep_depot.NewSigner(
@ -1256,10 +1256,10 @@ func registerSCEP(
scepService := NewSCEPService(
mdmStorage,
signer,
kitlog.With(logger, "component", "mdm-apple-scep"),
logger.With("component", "mdm-apple-scep"),
)
scepLogger := kitlog.With(logger, "component", "http-mdm-apple-scep")
scepLogger := logger.With("component", "http-mdm-apple-scep")
e := scepserver.MakeServerEndpoints(scepService)
e.GetEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.GetEndpoint)
e.PostEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.PostEndpoint)
@ -1271,7 +1271,7 @@ func registerSCEP(
func RegisterSCEPProxy(
rootMux *http.ServeMux,
ds fleet.Datastore,
logger kitlog.Logger,
logger *logging.Logger,
timeout *time.Duration,
fleetConfig *config.FleetConfig,
) error {
@ -1280,10 +1280,10 @@ func RegisterSCEPProxy(
}
scepService := eeservice.NewSCEPProxyService(
ds,
kitlog.With(logger, "component", "scep-proxy-service"),
logger.With("component", "scep-proxy-service"),
timeout,
)
scepLogger := kitlog.With(logger, "component", "http-scep-proxy")
scepLogger := logger.With("component", "http-scep-proxy")
e := scepserver.MakeServerEndpointsWithIdentifier(scepService)
e.GetEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.GetEndpoint)
e.PostEndpoint = scepserver.EndpointLoggingMiddleware(scepLogger)(e.PostEndpoint)
@ -1296,10 +1296,10 @@ func RegisterSCEPProxy(
// NanoMDMLogger is a logger adapter for nanomdm.
type NanoMDMLogger struct {
logger kitlog.Logger
logger *logging.Logger
}
func NewNanoMDMLogger(logger kitlog.Logger) *NanoMDMLogger {
func NewNanoMDMLogger(logger *logging.Logger) *NanoMDMLogger {
return &NanoMDMLogger{
logger: logger,
}
@ -1314,9 +1314,8 @@ func (l *NanoMDMLogger) Debug(keyvals ...interface{}) {
}
func (l *NanoMDMLogger) With(keyvals ...interface{}) nanomdm_log.Logger {
newLogger := kitlog.With(l.logger, keyvals...)
return &NanoMDMLogger{
logger: newLogger,
logger: l.logger.With(keyvals...),
}
}
@ -1327,11 +1326,11 @@ func registerMDM(
checkinAndCommandService nanomdm_service.CheckinAndCommandService,
ddmService nanomdm_service.DeclarativeManagement,
profileService nanomdm_service.ProfileService,
logger kitlog.Logger,
logger *logging.Logger,
fleetConfig config.FleetConfig,
) error {
certVerifier := mdmcrypto.NewSCEPVerifier(mdmStorage)
mdmLogger := NewNanoMDMLogger(kitlog.With(logger, "component", "http-mdm-apple-mdm"))
mdmLogger := NewNanoMDMLogger(logger.With("component", "http-mdm-apple-mdm"))
// As usual, handlers are applied from bottom to top:
// 1. Extract and verify MDM signature.
@ -1364,7 +1363,7 @@ func registerMDM(
return nil
}
func WithMDMEnrollmentMiddleware(svc fleet.Service, logger kitlog.Logger, next http.Handler) http.HandlerFunc {
func WithMDMEnrollmentMiddleware(svc fleet.Service, logger *logging.Logger, next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/mdm/sso" && r.URL.Path != "/account_driven_enroll/sso" {
// TODO: redirects for non-SSO config web url?

View file

@ -16,8 +16,8 @@ import (
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/endpointer"
"github.com/fleetdm/fleet/v4/server/platform/logging"
kithttp "github.com/go-kit/kit/transport/http"
kitlog "github.com/go-kit/log"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@ -31,7 +31,7 @@ func TestAPIRoutesConflicts(t *testing.T) {
svc, _ := newTestService(t, ds, nil, nil)
limitStore, _ := memstore.New(0)
cfg := config.TestConfig()
h := MakeHandler(svc, cfg, kitlog.NewNopLogger(), limitStore, nil, nil, nil)
h := MakeHandler(svc, cfg, logging.NewNopLogger(), limitStore, nil, nil, nil)
router := h.(*mux.Router)
type testCase struct {
@ -85,7 +85,7 @@ func TestAPIRoutesMetrics(t *testing.T) {
svc, _ := newTestService(t, ds, nil, nil)
limitStore, _ := memstore.New(0)
h := MakeHandler(svc, config.TestConfig(), kitlog.NewNopLogger(), limitStore, nil, nil, nil)
h := MakeHandler(svc, config.TestConfig(), logging.NewNopLogger(), limitStore, nil, nil, nil)
router := h.(*mux.Router)
// replace all handlers with mocks, and collect the requests to make to each

View file

@ -31,9 +31,9 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/apple/mobileconfig"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/tokenpki"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/test"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/smallstep/pkcs7"
"github.com/stretchr/testify/assert"
@ -3054,7 +3054,7 @@ func TestBulkOperationFilterValidation(t *testing.T) {
func TestSetDiskEncryptionNotifications(t *testing.T) {
ds := new(mock.Store)
ctx := context.Background()
svc := &Service{ds: ds, logger: kitlog.NewNopLogger()}
svc := &Service{ds: ds, logger: logging.NewNopLogger()}
tests := []struct {
name string

View file

@ -50,6 +50,7 @@ import (
"github.com/fleetdm/fleet/v4/server/live_query/live_query_mock"
"github.com/fleetdm/fleet/v4/server/mdm"
maintained_apps "github.com/fleetdm/fleet/v4/server/mdm/maintainedapps"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/policies"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/pubsub"
@ -59,8 +60,6 @@ import (
"github.com/fleetdm/fleet/v4/server/service/redis_lock"
"github.com/fleetdm/fleet/v4/server/service/schedule"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/go-kit/log"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
@ -114,16 +113,16 @@ func (s *integrationEnterpriseTestSuite) SetupSuite() {
Pool: s.redisPool,
Rs: pubsub.NewInmemQueryResults(),
Lq: s.lq,
Logger: log.NewLogfmtLogger(os.Stdout),
Logger: logging.NewLogfmtLogger(os.Stdout),
EnableCachedDS: true,
StartCronSchedules: []TestNewScheduleFunc{
func(ctx context.Context, ds fleet.Datastore) fleet.NewCronScheduleFunc {
return func() (fleet.CronSchedule, error) {
// We set 24-hour interval so that it only runs when triggered.
var err error
cronLog := log.NewJSONLogger(os.Stdout)
cronLog := logging.NewJSONLogger(os.Stdout)
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
cronLog = kitlog.NewNopLogger()
cronLog = logging.NewNopLogger()
}
calendarSchedule, err = cron.NewCalendarSchedule(
ctx, s.T().Name(), s.ds, redis_lock.NewLock(s.redisPool), config.CalendarConfig{Periodicity: 24 * time.Hour},
@ -139,7 +138,7 @@ func (s *integrationEnterpriseTestSuite) SetupSuite() {
DBConns: s.dbConns,
}
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
config.Logger = kitlog.NewNopLogger()
config.Logger = logging.NewNopLogger()
}
users, server := RunServerForTestsWithDS(s.T(), s.ds, &config)
s.server = server
@ -2920,7 +2919,7 @@ func (s *integrationEnterpriseTestSuite) TestNoTeamFailingPolicyWebhookTrigger()
}
// Trigger the webhook automation with a custom sendFunc that captures the call
err = policies.TriggerFailingPoliciesAutomation(ctx, s.ds, kitlog.NewNopLogger(), failingPolicySet,
err = policies.TriggerFailingPoliciesAutomation(ctx, s.ds, logging.NewNopLogger(), failingPolicySet,
func(pol *fleet.Policy, cfg policies.FailingPolicyAutomationConfig) error {
webhookCalled = true
capturedPolicy = pol
@ -12466,7 +12465,7 @@ func (s *integrationEnterpriseTestSuite) TestSoftwareInstallerUploadDownloadAndD
}
s.uploadSoftwareInstaller(t, payload, http.StatusOK, "")
logger := kitlog.NewLogfmtLogger(os.Stderr)
logger := logging.NewLogfmtLogger(os.Stderr)
// Run the migration when nothing is to be done
err = eeservice.UninstallSoftwareMigration(context.Background(), s.ds, s.softwareInstallStore, logger)
@ -19642,7 +19641,7 @@ func (s *integrationEnterpriseTestSuite) TestUpgradeCodesFromMaintainedApps() {
err = detailQueries["software_windows"].DirectIngestFunc(
context.Background(),
kitlog.NewNopLogger(),
logging.NewNopLogger(),
&fleet.Host{ID: host.ID},
s.ds,
rows,

View file

@ -19,9 +19,8 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/s3"
"github.com/fleetdm/fleet/v4/server/fleet"
software_mock "github.com/fleetdm/fleet/v4/server/mock/software"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/go-kit/log"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -54,13 +53,13 @@ func (s *integrationInstallTestSuite) SetupSuite() {
License: &fleet.LicenseInfo{
Tier: fleet.TierPremium,
},
Logger: log.NewLogfmtLogger(os.Stdout),
Logger: logging.NewLogfmtLogger(os.Stdout),
EnableCachedDS: true,
SoftwareInstallStore: softwareInstallStore,
FleetConfig: &fleetConfig,
}
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
installConfig.Logger = kitlog.NewNopLogger()
installConfig.Logger = logging.NewNopLogger()
}
users, server := RunServerForTestsWithDS(s.T(), s.ds, &installConfig)
s.server = server

View file

@ -19,9 +19,9 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/live_query/live_query_mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/pubsub"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
@ -61,7 +61,7 @@ func (s *liveQueriesTestSuite) SetupSuite() {
opts := &TestServerOpts{Lq: lq, Rs: rs}
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
opts.Logger = kitlog.NewNopLogger()
opts.Logger = logging.NewNopLogger()
}
users, server := RunServerForTestsWithDS(s.T(), s.ds, opts)
s.server = server

View file

@ -7,8 +7,8 @@ import (
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"strings"
"testing"
"time"
@ -17,10 +17,10 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
microsoft_mdm "github.com/fleetdm/fleet/v4/server/mdm/microsoft"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/platform/logging/testutils"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/contract"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
@ -36,15 +36,14 @@ type integrationLoggerTestSuite struct {
withServer
suite.Suite
buf *bytes.Buffer
handler *testutils.TestHandler
}
func (s *integrationLoggerTestSuite) SetupSuite() {
s.withDS.SetupSuite("integrationLoggerTestSuite")
s.buf = new(bytes.Buffer)
logger := log.NewJSONLogger(s.buf)
logger = level.NewFilter(logger, level.AllowDebug())
s.handler = testutils.NewTestHandler()
logger := logging.NewLogger(slog.New(s.handler))
redisPool := redistest.SetupRedis(s.T(), "zz", false, false, false)
users, server := RunServerForTestsWithDS(s.T(), s.ds, &TestServerOpts{
@ -59,7 +58,7 @@ func (s *integrationLoggerTestSuite) SetupSuite() {
}
func (s *integrationLoggerTestSuite) TearDownTest() {
s.buf.Reset()
s.handler.Clear()
}
func (s *integrationLoggerTestSuite) TestLogger() {
@ -77,33 +76,30 @@ func (s *integrationLoggerTestSuite) TestLogger() {
var createResp createQueryResponse
s.DoJSON("POST", "/api/latest/fleet/queries", params, http.StatusOK, &createResp)
logs := s.buf.String()
parts := strings.Split(strings.TrimSpace(logs), "\n")
assert.Len(t, parts, 3)
for i, part := range parts {
kv := make(map[string]string)
err := json.Unmarshal([]byte(part), &kv)
require.NoError(t, err)
records := s.handler.Records()
require.Len(t, records, 3)
for i, rec := range records {
attrs := testutils.RecordAttrs(&rec)
assert.NotEqual(t, "", kv["took"])
assert.Contains(t, attrs, "took")
switch i {
case 0:
assert.Equal(t, "info", kv["level"])
assert.Equal(t, "POST", kv["method"])
assert.Equal(t, "/api/latest/fleet/login", kv["uri"])
assert.Equal(t, slog.LevelInfo, rec.Level)
assert.Equal(t, "POST", attrs["method"])
assert.Equal(t, "/api/latest/fleet/login", attrs["uri"])
case 1:
assert.Equal(t, "debug", kv["level"])
assert.Equal(t, "GET", kv["method"])
assert.Equal(t, "/api/latest/fleet/config", kv["uri"])
assert.Equal(t, "admin1@example.com", kv["user"])
assert.Equal(t, slog.LevelDebug, rec.Level)
assert.Equal(t, "GET", attrs["method"])
assert.Equal(t, "/api/latest/fleet/config", attrs["uri"])
assert.Equal(t, "admin1@example.com", attrs["user"])
case 2:
assert.Equal(t, "debug", kv["level"])
assert.Equal(t, "POST", kv["method"])
assert.Equal(t, "/api/latest/fleet/queries", kv["uri"])
assert.Equal(t, "admin1@example.com", kv["user"])
assert.Equal(t, "somequery", kv["name"])
assert.Equal(t, "select 1 from osquery;", kv["sql"])
assert.Equal(t, slog.LevelDebug, rec.Level)
assert.Equal(t, "POST", attrs["method"])
assert.Equal(t, "/api/latest/fleet/queries", attrs["uri"])
assert.Equal(t, "admin1@example.com", attrs["user"])
assert.Equal(t, "somequery", attrs["name"])
assert.Equal(t, "select 1 from osquery;", attrs["sql"])
default:
t.Fail()
}
@ -113,36 +109,38 @@ func (s *integrationLoggerTestSuite) TestLogger() {
func (s *integrationLoggerTestSuite) TestLoggerLogin() {
t := s.T()
type logEntry struct {
type expectedAttr struct {
key string
val string
val any
}
testCases := []struct {
loginRequest contract.LoginRequest
expectedStatus int
expectedLogs []logEntry
expectedLevel slog.Level
expectedAttrs []expectedAttr
}{
{
loginRequest: contract.LoginRequest{Email: testUsers["admin1"].Email, Password: testUsers["admin1"].PlaintextPassword},
expectedStatus: http.StatusOK,
expectedLogs: []logEntry{{"email", testUsers["admin1"].Email}},
expectedLevel: slog.LevelInfo,
expectedAttrs: []expectedAttr{{"email", testUsers["admin1"].Email}},
},
{
loginRequest: contract.LoginRequest{Email: testUsers["admin1"].Email, Password: "n074v411dp455w02d"},
expectedStatus: http.StatusUnauthorized,
expectedLogs: []logEntry{
expectedLevel: slog.LevelInfo,
expectedAttrs: []expectedAttr{
{"email", testUsers["admin1"].Email},
{"level", "info"},
{"internal", "invalid password"},
},
},
{
loginRequest: contract.LoginRequest{Email: "h4x0r@3x4mp13.c0m", Password: "n074v411dp455w02d"},
expectedStatus: http.StatusUnauthorized,
expectedLogs: []logEntry{
expectedLevel: slog.LevelInfo,
expectedAttrs: []expectedAttr{
{"email", "h4x0r@3x4mp13.c0m"},
{"level", "info"},
{"internal", "user not found"},
},
},
@ -150,18 +148,18 @@ func (s *integrationLoggerTestSuite) TestLoggerLogin() {
var resp loginResponse
for _, tt := range testCases {
s.DoJSON("POST", "/api/latest/fleet/login", tt.loginRequest, tt.expectedStatus, &resp)
logString := s.buf.String()
parts := strings.Split(strings.TrimSpace(logString), "\n")
require.Len(t, parts, 1)
logData := make(map[string]string)
require.NoError(t, json.Unmarshal([]byte(parts[0]), &logData))
require.NotContains(t, logData, "user") // logger context is set to skip user
records := s.handler.Records()
require.Len(t, records, 1)
assert.Equal(t, tt.expectedLevel, records[0].Level)
for _, e := range tt.expectedLogs {
assert.Equal(t, e.val, logData[e.key], fmt.Sprintf("%+v", tt.expectedLogs))
attrs := testutils.RecordAttrs(&records[0])
require.NotContains(t, attrs, "user") // logger context is set to skip user
for _, e := range tt.expectedAttrs {
assert.Equal(t, e.val, attrs[e.key], fmt.Sprintf("%+v", tt.expectedAttrs))
}
s.buf.Reset()
s.handler.Clear()
}
}
@ -200,10 +198,21 @@ func (s *integrationLoggerTestSuite) TestOsqueryEndpointsLogErrors() {
assert.Equal(t, "json decoder error", jsn.Errs[0]["reason"])
require.NotEmpty(t, jsn.UUID)
logString := s.buf.String()
assert.Contains(t, logString, `invalid character '}' looking for beginning of value","level":"info","path":"/api/osquery/log"`)
assert.Contains(t, logString, `"uuid":"`+jsn.UUID)
assert.Contains(t, logString, `"took":`)
records := s.handler.Records()
require.NotEmpty(t, records)
var foundErrRecord bool
for i := range records {
attrs := testutils.RecordAttrs(&records[i])
if attrs["uuid"] == jsn.UUID {
foundErrRecord = true
assert.Equal(t, slog.LevelInfo, records[i].Level)
assert.Equal(t, "/api/osquery/log", attrs["path"])
assert.Contains(t, fmt.Sprint(attrs["internal"]), `invalid character '}' looking for beginning of value`)
assert.Contains(t, attrs, "took")
break
}
}
require.True(t, foundErrRecord, "expected a log record with uuid %s", jsn.UUID)
}
func (s *integrationLoggerTestSuite) TestSubmitLog() {
@ -223,6 +232,22 @@ func (s *integrationLoggerTestSuite) TestSubmitLog() {
})
require.NoError(t, err)
assertIPAddrLogged := func(records []slog.Record) {
t.Helper()
var ipAddrCount, xForIPAddrCount int
for i := range records {
attrs := testutils.RecordAttrs(&records[i])
if _, ok := attrs["ip_addr"]; ok {
ipAddrCount++
}
if _, ok := attrs["x_for_ip_addr"]; ok {
xForIPAddrCount++
}
}
assert.Equal(t, 1, ipAddrCount)
assert.Equal(t, 1, xForIPAddrCount)
}
// submit status logs
req := submitLogsRequest{
NodeKey: *h.NodeKey,
@ -232,10 +257,8 @@ func (s *integrationLoggerTestSuite) TestSubmitLog() {
res := submitLogsResponse{}
s.DoJSON("POST", "/api/osquery/log", req, http.StatusOK, &res)
logString := s.buf.String()
assert.Equal(t, 1, strings.Count(logString, `"ip_addr"`))
assert.Equal(t, 1, strings.Count(logString, "x_for_ip_addr"))
s.buf.Reset()
assertIPAddrLogged(s.handler.Records())
s.handler.Clear()
// submit results logs
req = submitLogsRequest{
@ -246,10 +269,8 @@ func (s *integrationLoggerTestSuite) TestSubmitLog() {
res = submitLogsResponse{}
s.DoJSON("POST", "/api/osquery/log", req, http.StatusOK, &res)
logString = s.buf.String()
assert.Equal(t, 1, strings.Count(logString, `"ip_addr"`))
assert.Equal(t, 1, strings.Count(logString, "x_for_ip_addr"))
s.buf.Reset()
assertIPAddrLogged(s.handler.Records())
s.handler.Clear()
// submit invalid type logs
req = submitLogsRequest{
@ -260,7 +281,7 @@ func (s *integrationLoggerTestSuite) TestSubmitLog() {
var errRes map[string]string
s.DoJSON("POST", "/api/osquery/log", req, http.StatusInternalServerError, &errRes)
assert.Contains(t, errRes["error"], "unknown log type")
s.buf.Reset()
s.handler.Clear()
// submit gzip-encoded request
var body bytes.Buffer
@ -274,9 +295,7 @@ func (s *integrationLoggerTestSuite) TestSubmitLog() {
require.NoError(t, gw.Close())
s.DoRawWithHeaders("POST", "/api/osquery/log", body.Bytes(), http.StatusOK, map[string]string{"Content-Encoding": "gzip"})
logString = s.buf.String()
assert.Equal(t, 1, strings.Count(logString, `"ip_addr"`))
assert.Equal(t, 1, strings.Count(logString, "x_for_ip_addr"))
assertIPAddrLogged(s.handler.Records())
// submit same payload without specifying gzip encoding fails
s.DoRawWithHeaders("POST", "/api/osquery/log", body.Bytes(), http.StatusBadRequest, nil)
@ -306,13 +325,13 @@ func (s *integrationLoggerTestSuite) TestEnrollOsqueryLogsErrors() {
s.DoRawNoAuth("POST", "/api/osquery/enroll", j, http.StatusUnauthorized)
parts := strings.Split(strings.TrimSpace(s.buf.String()), "\n")
require.Len(t, parts, 1)
logData := make(map[string]json.RawMessage)
require.NoError(t, json.Unmarshal([]byte(parts[0]), &logData))
assert.Equal(t, `"info"`, string(logData["level"]))
assert.Contains(t, string(logData["err"]), `"enroll failed:`)
assert.Contains(t, string(logData["err"]), `no matching secret found`)
records := s.handler.Records()
require.Len(t, records, 1)
assert.Equal(t, slog.LevelInfo, records[0].Level)
attrs := testutils.RecordAttrs(&records[0])
errStr := fmt.Sprint(attrs["err"])
assert.Contains(t, errStr, "enroll failed:")
assert.Contains(t, errStr, "no matching secret found")
}
func (s *integrationLoggerTestSuite) TestSetupExperienceEULAMetadataDoesNotLogErrorIfNotFound() {
@ -337,15 +356,12 @@ func (s *integrationLoggerTestSuite) TestSetupExperienceEULAMetadataDoesNotLogEr
s.token = getTestAdminToken(t, s.server)
s.Do("GET", "/api/v1/fleet/setup_experience/eula/metadata", nil, http.StatusNotFound)
logs := strings.Split(strings.TrimSpace(s.buf.String()), "\n")
require.Len(t, logs, 2) // Login and not found
records := s.handler.Records()
require.Len(t, records, 2) // Login and not found
logData := make(map[string]json.RawMessage)
log := logs[1]
assert.NoError(t, json.Unmarshal([]byte(log), &logData))
assert.Equal(t, `"info"`, string(logData["level"]))
assert.Equal(t, string(logData["err"]), `"not found"`)
assert.Equal(t, slog.LevelInfo, records[1].Level)
attrs := testutils.RecordAttrs(&records[1])
assert.Equal(t, "not found", fmt.Sprint(attrs["err"]))
}
func (s *integrationLoggerTestSuite) TestWindowsMDMEnrollEmptyBinarySecurityToken() {
@ -373,28 +389,22 @@ func (s *integrationLoggerTestSuite) TestWindowsMDMEnrollEmptyBinarySecurityToke
err = mdmDevice.Enroll()
require.Error(t, err)
t.Log(s.buf.String())
records := s.handler.Records()
var foundDiscovery, foundPolicy, foundEnroll bool
for line := range strings.SplitSeq(s.buf.String(), "\n") {
line = strings.TrimSpace(line)
if !strings.HasPrefix(line, "{") {
continue
}
for i := range records {
attrs := testutils.RecordAttrs(&records[i])
uri, _ := attrs["uri"].(string)
var m map[string]string
err := json.Unmarshal([]byte(line), &m)
require.NoError(t, err)
switch m["uri"] {
switch uri {
case microsoft_mdm.MDE2DiscoveryPath:
foundDiscovery = true
case microsoft_mdm.MDE2PolicyPath:
foundPolicy = true
require.Equal(t, "info", m["level"])
require.Equal(t, "binarySecurityToken is empty", m["soap_fault"])
require.Equal(t, slog.LevelInfo, records[i].Level)
require.Equal(t, "binarySecurityToken is empty", attrs["soap_fault"])
case microsoft_mdm.MDE2EnrollPath:
foundEnroll = false
foundEnroll = true
}
}
require.True(t, foundDiscovery)

View file

@ -27,8 +27,8 @@ import (
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/mdm"
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/push"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
micromdm "github.com/micromdm/micromdm/mdm/mdm"
@ -915,7 +915,7 @@ func (s *integrationMDMTestSuite) TestLifecycleSCEPCertExpiration() {
require.NoError(t, err)
fleetCfg := config.TestConfig()
config.SetTestMDMConfig(s.T(), &fleetCfg, cert, key, "")
logger := kitlog.NewJSONLogger(os.Stdout)
logger := logging.NewJSONLogger(os.Stdout)
// run without expired certs, no command enqueued
err = RenewSCEPCertificates(ctx, logger, s.ds, &fleetCfg, s.mdmCommander)

View file

@ -77,6 +77,7 @@ import (
nanomdm_pushsvc "github.com/fleetdm/fleet/v4/server/mdm/nanomdm/push/service"
scepserver "github.com/fleetdm/fleet/v4/server/mdm/scep/server"
mdmtesting "github.com/fleetdm/fleet/v4/server/mdm/testing_utils"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/contract"
"github.com/fleetdm/fleet/v4/server/service/integrationtest/scep_server"
@ -86,7 +87,6 @@ import (
"github.com/fleetdm/fleet/v4/server/service/schedule"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/fleetdm/fleet/v4/server/worker"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
micromdm "github.com/micromdm/micromdm/mdm/mdm"
@ -125,7 +125,7 @@ type integrationMDMTestSuite struct {
// Flag to skip jobs processing by worker
skipWorkerJobs atomic.Bool
mdmCommander *apple_mdm.MDMAppleCommander
logger kitlog.Logger
logger *logging.Logger
scepChallenge string
appleVPPConfigSrv *httptest.Server
appleVPPConfigSrvConfig *appleVPPConfigSrvConf
@ -198,9 +198,9 @@ func (s *integrationMDMTestSuite) SetupSuite() {
scepStorage, err := s.ds.NewSCEPDepot()
require.NoError(s.T(), err)
pushLog := kitlog.NewJSONLogger(os.Stdout)
pushLog := logging.NewJSONLogger(os.Stdout)
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
pushLog = kitlog.NewNopLogger()
pushLog = logging.NewNopLogger()
}
pushFactory, pushProvider := newMockAPNSPushProviderFactory()
mdmPushService := nanomdm_pushsvc.New(
@ -213,9 +213,9 @@ func (s *integrationMDMTestSuite) SetupSuite() {
s.redisPool = redistest.SetupRedis(s.T(), "zz", false, false, false)
s.withServer.lq = live_query_mock.New(s.T())
wlog := kitlog.NewJSONLogger(os.Stdout)
wlog := logging.NewJSONLogger(os.Stdout)
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
wlog = kitlog.NewNopLogger()
wlog = logging.NewNopLogger()
}
activityModule := activities.NewActivityModule(s.ds, wlog)
@ -270,13 +270,13 @@ func (s *integrationMDMTestSuite) SetupSuite() {
var profileSchedule *schedule.Schedule
var cleanupsSchedule *schedule.Schedule
var androidProfileSchedule *schedule.Schedule
cronLog := kitlog.NewJSONLogger(os.Stdout)
cronLog := logging.NewJSONLogger(os.Stdout)
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
cronLog = kitlog.NewNopLogger()
cronLog = logging.NewNopLogger()
}
serverLogger := kitlog.NewJSONLogger(os.Stdout)
serverLogger := logging.NewJSONLogger(os.Stdout)
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
serverLogger = kitlog.NewNopLogger()
serverLogger = logging.NewNopLogger()
}
var softwareInstallerStore fleet.SoftwareInstallerStore
@ -6141,7 +6141,7 @@ func (s *integrationMDMTestSuite) TestSSO() {
}
err = detailQueries["mdm"].DirectIngestFunc(
context.Background(),
kitlog.NewNopLogger(),
logging.NewNopLogger(),
&fleet.Host{ID: hostResp.Host.ID},
s.ds,
rows,
@ -6172,7 +6172,7 @@ func (s *integrationMDMTestSuite) TestSSO() {
}
err = detailQueries["google_chrome_profiles"].DirectIngestFunc(
context.Background(),
kitlog.NewNopLogger(),
logging.NewNopLogger(),
&fleet.Host{ID: hostResp.Host.ID},
s.ds,
rows,
@ -6238,7 +6238,7 @@ func (s *integrationMDMTestSuite) TestSSO() {
// reporting google chrome profiles only clears chrome profiles from device mapping
err = detailQueries["google_chrome_profiles"].DirectIngestFunc(
context.Background(),
kitlog.NewNopLogger(),
logging.NewNopLogger(),
&fleet.Host{ID: hostResp.Host.ID},
s.ds,
[]map[string]string{},
@ -6485,7 +6485,7 @@ func (s *integrationMDMTestSuite) TestSSOWithSCIM() {
}
err = detailQueries["mdm"].DirectIngestFunc(
context.Background(),
kitlog.NewNopLogger(),
logging.NewNopLogger(),
&fleet.Host{ID: hostResp.Host.ID},
s.ds,
rows,
@ -6506,7 +6506,7 @@ func (s *integrationMDMTestSuite) TestSSOWithSCIM() {
}
err = detailQueries["google_chrome_profiles"].DirectIngestFunc(
context.Background(),
kitlog.NewNopLogger(),
logging.NewNopLogger(),
&fleet.Host{ID: hostResp.Host.ID},
s.ds,
rows,
@ -11576,7 +11576,7 @@ func (s *integrationMDMTestSuite) TestSilentMigrationGotchas() {
require.NoError(t, err)
fleetCfg := config.TestConfig()
config.SetTestMDMConfig(s.T(), &fleetCfg, cert, key, "")
logger := kitlog.NewJSONLogger(os.Stdout)
logger := logging.NewJSONLogger(os.Stdout)
err = RenewSCEPCertificates(ctx, logger, s.ds, &fleetCfg, s.mdmCommander)
require.NoError(t, err)

View file

@ -6,7 +6,7 @@ import (
"os"
"testing"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
@ -23,7 +23,7 @@ func (s *integrationSMTPTestSuite) SetupSuite() {
UseMailService: true,
}
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
opts.Logger = kitlog.NewNopLogger()
opts.Logger = logging.NewNopLogger()
}
users, server := RunServerForTestsWithDS(
s.T(),

View file

@ -23,9 +23,9 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/test"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -43,7 +43,7 @@ func (s *integrationSSOTestSuite) SetupSuite() {
pool := redistest.SetupRedis(s.T(), "zz", false, false, false)
opts := &TestServerOpts{Pool: pool, DBConns: s.dbConns}
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
opts.Logger = kitlog.NewNopLogger()
opts.Logger = logging.NewNopLogger()
}
users, server := RunServerForTestsWithDS(s.T(), s.ds, opts)
s.server = server

View file

@ -9,10 +9,10 @@ import (
android_mock "github.com/fleetdm/fleet/v4/server/mdm/android/mock"
android_service "github.com/fleetdm/fleet/v4/server/mdm/android/service"
"github.com/fleetdm/fleet/v4/server/platform/endpointer"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/service/integrationtest"
"github.com/fleetdm/fleet/v4/server/service/modules/activities"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
)
@ -23,7 +23,7 @@ type Suite struct {
func SetUpSuite(t *testing.T, uniqueTestName string) *Suite {
ds, redisPool, fleetCfg, fleetSvc, ctx := integrationtest.SetUpMySQLAndRedisAndService(t, uniqueTestName)
logger := log.NewLogfmtLogger(os.Stdout)
logger := logging.NewLogfmtLogger(os.Stdout)
proxy := android_mock.Client{}
proxy.InitCommonMocks()
activityModule := activities.NewActivityModule(ds, logger)

View file

@ -9,15 +9,15 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/platform/mysql/testing_utils"
"github.com/fleetdm/fleet/v4/server/service"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
)
type BaseSuite struct {
Logger log.Logger
Logger *logging.Logger
FleetCfg config.FleetConfig
Server *httptest.Server
DS *mysql.Datastore

View file

@ -319,7 +319,7 @@ func getHostIdentifier(logger log.Logger, identifierOption, providedIdentifier s
}
func (svc *Service) debugEnabledForHost(ctx context.Context, id uint) bool {
hlogger := log.With(svc.logger, "host-id", id)
hlogger := svc.logger.With("host-id", id)
ac, err := svc.ds.AppConfig(ctx)
if err != nil {
level.Debug(hlogger).Log("err", ctxerr.Wrap(ctx, err, "getting app config for host debug"))
@ -2031,7 +2031,7 @@ func (svc *Service) processSoftwareForNewlyFailingPolicies(
if err != nil {
return ctxerr.Wrap(ctx, err, "get software installer metadata by id")
}
logger := log.With(svc.logger,
logger := svc.logger.With(
"host_id", hostID,
"host_platform", hostPlatform,
"policy_id", failingPolicyWithInstaller.ID,
@ -2190,7 +2190,7 @@ func (svc *Service) processVPPForNewlyFailingPolicies(
for _, failingPolicyWithVPP := range failingPoliciesWithVPP {
policyID := failingPolicyWithVPP.ID
logger := log.With(svc.logger,
logger := svc.logger.With(
"host_id", hostID,
"host_platform", hostPlatform,
"policy_id", policyID,
@ -2349,7 +2349,7 @@ func (svc *Service) processScriptsForNewlyFailingPolicies(
if err != nil {
return ctxerr.Wrap(ctx, err, "get script metadata by id")
}
logger := log.With(svc.logger,
logger := svc.logger.With(
"host_id", hostID,
"host_platform", hostPlatform,
"policy_id", policyID,
@ -2559,7 +2559,7 @@ func (svc *Service) setHostConditionalAccessAsync(
compliant bool,
) {
go func() {
logger := log.With(svc.logger,
logger := svc.logger.With(
"msg", "set host conditional access",
"host_id", hostID,
"managed", managed,
@ -2589,7 +2589,7 @@ func (svc *Service) setHostConditionalAccess(
if err != nil {
return ctxerr.Wrap(ctx, err, "get integration")
}
logger := log.With(svc.logger,
logger := svc.logger.With(
"msg", "set compliance status",
"host_id", hostID,
"managed", managed,
@ -2665,7 +2665,7 @@ func (svc *Service) maybeDebugHost(
stats map[string]*fleet.Stats,
) {
if svc.debugEnabledForHost(ctx, host.ID) {
hlogger := log.With(svc.logger, "host-id", host.ID)
hlogger := svc.logger.With("host-id", host.ID)
logJSON(hlogger, host, "host")
logJSON(hlogger, results, "results")

View file

@ -30,13 +30,12 @@ import (
"github.com/fleetdm/fleet/v4/server/live_query/live_query_mock"
"github.com/fleetdm/fleet/v4/server/mock"
mockresult "github.com/fleetdm/fleet/v4/server/mock/mockresult"
platformlogging "github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/pubsub"
"github.com/fleetdm/fleet/v4/server/service/async"
"github.com/fleetdm/fleet/v4/server/service/osquery_utils"
"github.com/fleetdm/fleet/v4/server/service/redis_policy_set"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -558,7 +557,7 @@ func TestSubmitStatusLogs(t *testing.T) {
func TestSubmitResultLogsToLogDestination(t *testing.T) {
ds := new(mock.Store)
svc, ctx := newTestService(t, ds, nil, nil, &TestServerOpts{Logger: log.NewJSONLogger(os.Stdout)})
svc, ctx := newTestService(t, ds, nil, nil, &TestServerOpts{Logger: platformlogging.NewJSONLogger(os.Stdout)})
ds.AppConfigFunc = func(ctx context.Context) (*fleet.AppConfig, error) {
return &fleet.AppConfig{}, nil
@ -1205,7 +1204,7 @@ func TestHostDetailQueries(t *testing.T) {
svc := &Service{
clock: mockClock,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
config: config.TestConfig(),
ds: ds,
jitterMu: new(sync.Mutex),
@ -2248,7 +2247,7 @@ func TestMDMQueries(t *testing.T) {
ds := new(mock.Store)
svc := &Service{
clock: clock.NewMockClock(),
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
config: config.TestConfig(),
ds: ds,
jitterMu: new(sync.Mutex),
@ -2536,7 +2535,7 @@ func TestIngestDistributedQueryParseIdError(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2555,7 +2554,7 @@ func TestIngestDistributedQueryOrphanedCampaignLoadError(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2581,7 +2580,7 @@ func TestIngestDistributedQueryOrphanedCampaignWaitListener(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2614,7 +2613,7 @@ func TestIngestDistributedQueryOrphanedCloseError(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2650,7 +2649,7 @@ func TestIngestDistributedQueryOrphanedStopError(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2687,7 +2686,7 @@ func TestIngestDistributedQueryOrphanedStop(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2725,7 +2724,7 @@ func TestIngestDistributedQueryRecordCompletionError(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -2756,7 +2755,7 @@ func TestIngestDistributedQuery(t *testing.T) {
ds: ds,
resultStore: rs,
liveQueryStore: lq,
logger: log.NewNopLogger(),
logger: platformlogging.NewNopLogger(),
clock: mockClock,
}
@ -3032,7 +3031,7 @@ func TestGetHostIdentifier(t *testing.T) {
{identifierOption: "hostname", providedIdentifier: "foobar", details: details, expected: "foohost"},
{identifierOption: "provided", providedIdentifier: "foobar", details: details, expected: "foobar"},
}
logger := log.NewNopLogger()
logger := platformlogging.NewNopLogger()
for _, tt := range testCases {
t.Run("", func(t *testing.T) {
@ -3055,8 +3054,7 @@ func TestGetHostIdentifier(t *testing.T) {
func TestDistributedQueriesLogsManyErrors(t *testing.T) {
buf := new(bytes.Buffer)
logger := log.NewJSONLogger(buf)
logger = level.NewFilter(logger, level.AllowDebug())
logger := platformlogging.NewJSONLogger(buf)
ds := new(mock.Store)
svc, ctx := newTestService(t, ds, nil, nil)
@ -3871,7 +3869,7 @@ func TestLiveQueriesFailing(t *testing.T) {
lq := live_query_mock.New(t)
cfg := config.TestConfig()
buf := new(bytes.Buffer)
logger := log.NewLogfmtLogger(buf)
logger := platformlogging.NewLogfmtLogger(buf)
svc, ctx := newTestServiceWithConfig(t, ds, cfg, nil, lq, &TestServerOpts{
Logger: logger,
})
@ -4543,7 +4541,7 @@ func TestPreProcessSoftwareResults(t *testing.T) {
host = tc.host
}
// mutates tc.resultsIn
preProcessSoftwareResults(host, tc.resultsIn, tc.statusesIn, tc.messagesIn, tc.overrides, log.NewNopLogger())
preProcessSoftwareResults(host, tc.resultsIn, tc.statusesIn, tc.messagesIn, tc.overrides, platformlogging.NewNopLogger())
require.Equal(t, tc.resultsExpected, tc.resultsIn)
})
}
@ -4626,7 +4624,7 @@ func BenchmarkPreprocessUbuntuPythonPackageFilter(b *testing.B) {
}
for i := 0; i < b.N; i++ {
preProcessSoftwareResults(&fleet.Host{ID: 1, Platform: platform}, results, statuses, nil, nil, log.NewNopLogger())
preProcessSoftwareResults(&fleet.Host{ID: 1, Platform: platform}, results, statuses, nil, nil, platformlogging.NewNopLogger())
}
}

View file

@ -14,7 +14,7 @@ import (
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
@ -33,7 +33,7 @@ type Schedule struct {
ctx context.Context
name string
instanceID string
logger log.Logger
logger *logging.Logger
defaultPrevRunCreatedAt time.Time // default timestamp of previous run for the schedule if none exists, time.Now if not set
@ -92,9 +92,9 @@ type CronStatsStore interface {
type Option func(*Schedule)
// WithLogger sets a logger for the Schedule.
func WithLogger(l log.Logger) Option {
func WithLogger(l *logging.Logger) Option {
return func(s *Schedule) {
s.logger = log.With(l, "schedule", s.name)
s.logger = l.With("schedule", s.name)
}
}
@ -168,7 +168,7 @@ func New(
ctx: ctx,
name: name,
instanceID: instanceID,
logger: log.NewNopLogger(),
logger: logging.NewNopLogger(),
trigger: make(chan struct{}),
done: make(chan struct{}),
configReloadInterval: 1 * time.Hour, // by default we will check for updated config once per hour
@ -180,9 +180,9 @@ func New(
fn(sch)
}
if sch.logger == nil {
sch.logger = log.NewNopLogger()
sch.logger = logging.NewNopLogger()
}
sch.logger = log.With(sch.logger, "instanceID", instanceID)
sch.logger = sch.logger.With("instanceID", instanceID)
sch.errors = make(fleet.CronScheduleErrors)
return sch
}

View file

@ -19,10 +19,10 @@ import (
nanodep_storage "github.com/fleetdm/fleet/v4/server/mdm/nanodep/storage"
nanomdm_push "github.com/fleetdm/fleet/v4/server/mdm/nanomdm/push"
nanomdm_storage "github.com/fleetdm/fleet/v4/server/mdm/nanomdm/storage"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service/async"
"github.com/fleetdm/fleet/v4/server/service/conditional_access_microsoft_proxy"
"github.com/fleetdm/fleet/v4/server/sso"
kitlog "github.com/go-kit/log"
)
var _ fleet.Service = (*Service)(nil)
@ -34,7 +34,7 @@ type Service struct {
carveStore fleet.CarveStore
resultStore fleet.QueryResultStore
liveQueryStore fleet.LiveQueryStore
logger kitlog.Logger
logger *logging.Logger
config config.FleetConfig
clock clock.Clock
@ -124,7 +124,7 @@ func NewService(
ds fleet.Datastore,
task *async.Task,
resultStore fleet.QueryResultStore,
logger kitlog.Logger,
logger *logging.Logger,
osqueryLogger *OsqueryLogger,
config config.FleetConfig,
mailService fleet.MailService,

View file

@ -55,7 +55,7 @@ type statsTracker struct {
func (svc Service) StreamCampaignResults(ctx context.Context, conn *websocket.Conn, campaignID uint) {
logging.WithExtras(ctx, "campaign_id", campaignID)
logger := log.With(svc.logger, "campaignID", campaignID)
logger := svc.logger.With("campaignID", campaignID)
// Explicitly set ObserverCanRun: true in this check because we check that the user trying to
// read results is the same user that initiated the query. This means the observer check already

View file

@ -25,13 +25,13 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/redis/redistest"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/live_query/live_query_mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
common_mysql "github.com/fleetdm/fleet/v4/server/platform/mysql"
"github.com/fleetdm/fleet/v4/server/pubsub"
"github.com/fleetdm/fleet/v4/server/service/contract"
"github.com/fleetdm/fleet/v4/server/test"
fleet_httptest "github.com/fleetdm/fleet/v4/server/test/httptest"
"github.com/ghodss/yaml"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -98,7 +98,7 @@ func (ts *withServer) SetupSuite(dbName string) {
DBConns: ts.dbConns,
}
if os.Getenv("FLEET_INTEGRATION_TESTS_DISABLE_LOG") != "" {
opts.Logger = kitlog.NewNopLogger()
opts.Logger = logging.NewNopLogger()
}
users, server := RunServerForTestsWithDS(ts.s.T(), ts.ds, opts)
ts.server = server

View file

@ -49,6 +49,7 @@ import (
scep_depot "github.com/fleetdm/fleet/v4/server/mdm/scep/depot"
nanodep_mock "github.com/fleetdm/fleet/v4/server/mock/nanodep"
"github.com/fleetdm/fleet/v4/server/platform/endpointer"
platformlogging "github.com/fleetdm/fleet/v4/server/platform/logging"
common_mysql "github.com/fleetdm/fleet/v4/server/platform/mysql"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/service/async"
@ -59,7 +60,6 @@ import (
"github.com/fleetdm/fleet/v4/server/sso"
"github.com/fleetdm/fleet/v4/server/test"
"github.com/go-kit/kit/endpoint"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -74,11 +74,12 @@ func newTestService(t *testing.T, ds fleet.Datastore, rs fleet.QueryResultStore,
func newTestServiceWithConfig(t *testing.T, ds fleet.Datastore, fleetConfig config.FleetConfig, rs fleet.QueryResultStore, lq fleet.LiveQueryStore, opts ...*TestServerOpts) (fleet.Service, context.Context) {
lic := &fleet.LicenseInfo{Tier: fleet.TierFree}
writer, err := logging.NewFilesystemLogWriter(fleetConfig.Filesystem.StatusLogFile, kitlog.NewNopLogger(), fleetConfig.Filesystem.EnableLogRotation, fleetConfig.Filesystem.EnableLogCompression, 500, 28, 3)
logger := platformlogging.NewNopLogger()
writer, err := logging.NewFilesystemLogWriter(fleetConfig.Filesystem.StatusLogFile, logger, fleetConfig.Filesystem.EnableLogRotation,
fleetConfig.Filesystem.EnableLogCompression, 500, 28, 3)
require.NoError(t, err)
osqlogger := &OsqueryLogger{Status: writer, Result: writer}
logger := kitlog.NewNopLogger()
var (
failingPolicySet fleet.FailingPolicySet = NewMemFailingPolicySet()
@ -382,7 +383,7 @@ type ConditionalAccess struct {
}
type TestServerOpts struct {
Logger kitlog.Logger
Logger *platformlogging.Logger
License *fleet.LicenseInfo
SkipCreateTestUsers bool
Rs fleet.QueryResultStore
@ -448,7 +449,7 @@ func RunServerForTestsWithServiceWithDS(t *testing.T, ctx context.Context, ds fl
if len(opts) == 0 || (len(opts) > 0 && !opts[0].SkipCreateTestUsers) {
users = createTestUsers(t, ds)
}
logger := kitlog.NewLogfmtLogger(os.Stdout)
logger := platformlogging.NewLogfmtLogger(os.Stdout)
if len(opts) > 0 && opts[0].Logger != nil {
logger = opts[0].Logger
}
@ -558,7 +559,8 @@ func RunServerForTestsWithServiceWithDS(t *testing.T, ctx context.Context, ds fl
if len(opts) > 0 && opts[0].HostIdentity != nil {
require.NoError(t, hostidentity.RegisterSCEP(rootMux, opts[0].HostIdentity.SCEPStorage, ds, logger, &cfg))
var httpSigVerifier func(http.Handler) http.Handler
httpSigVerifier, err := httpsig.Middleware(ds, opts[0].HostIdentity.RequireHTTPMessageSignature, kitlog.With(logger, "component", "http-sig-verifier"))
httpSigVerifier, err := httpsig.Middleware(ds, opts[0].HostIdentity.RequireHTTPMessageSignature,
logger.With("component", "http-sig-verifier"))
require.NoError(t, err)
extra = append(extra, WithHTTPSigVerifier(httpSigVerifier))
}

View file

@ -16,9 +16,9 @@ import (
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
nanomdm_push "github.com/fleetdm/fleet/v4/server/mdm/nanomdm/push"
mock "github.com/fleetdm/fleet/v4/server/mock/mdm"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
"github.com/fleetdm/fleet/v4/server/test"
kitlog "github.com/go-kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
@ -98,9 +98,9 @@ func TestAppleMDM(t *testing.T) {
mdmStorage, err := ds.NewMDMAppleMDMStorage()
require.NoError(t, err)
// nopLog := kitlog.NewNopLogger()
// nopLog := logging.NewNopLogger()
// use this to debug/verify details of calls
nopLog := kitlog.NewJSONLogger(os.Stdout)
nopLog := logging.NewJSONLogger(os.Stdout)
testOrgName := "fleet-test"
@ -1425,7 +1425,7 @@ func TestGetSignedURL(t *testing.T) {
var data []byte
buf := bytes.NewBuffer(data)
logger := kitlog.NewLogfmtLogger(buf)
logger := logging.NewLogfmtLogger(buf)
a := &AppleMDM{Log: logger}
// S3 not configured

View file

@ -9,8 +9,8 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/test"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -23,9 +23,9 @@ func TestDBMigrationsVPPToken(t *testing.T) {
// call TruncateTables immediately as a DB migration may have created jobs
mysql.TruncateTables(t, ds)
nopLog := kitlog.NewNopLogger()
nopLog := logging.NewNopLogger()
// use this to debug/verify details of calls
// nopLog := kitlog.NewJSONLogger(os.Stdout)
// nopLog := logging.NewJSONLogger(os.Stdout)
// create and register the worker
processor := &DBMigration{

View file

@ -17,8 +17,8 @@ import (
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
nanodep_client "github.com/fleetdm/fleet/v4/server/mdm/nanodep/client"
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
kitlog "github.com/go-kit/log"
"github.com/stretchr/testify/require"
)
@ -73,7 +73,7 @@ func TestMacosSetupAssistant(t *testing.T) {
err = ds.AddHostsToTeam(ctx, fleet.NewAddHostsToTeamParams(&tm2.ID, []uint{hosts[4].ID, hosts[5].ID}))
require.NoError(t, err)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
depStorage, err := ds.NewMDMAppleDEPStorage()
require.NoError(t, err)
macosJob := &MacosSetupAssistant{

View file

@ -8,7 +8,7 @@ import (
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
"github.com/fleetdm/fleet/v4/server/fleet"
kitlog "github.com/go-kit/log"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/go-kit/log/level"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
@ -62,7 +62,7 @@ type vulnArgs struct {
// Worker runs jobs. NOT SAFE FOR CONCURRENT USE.
type Worker struct {
ds fleet.Datastore
log kitlog.Logger
log *logging.Logger
// For tests only, allows ignoring unknown jobs instead of failing them.
TestIgnoreUnknownJobs bool
@ -74,7 +74,7 @@ type Worker struct {
registry map[string]Job
}
func NewWorker(ds fleet.Datastore, log kitlog.Logger) *Worker {
func NewWorker(ds fleet.Datastore, log *logging.Logger) *Worker {
return &Worker{
ds: ds,
log: log,
@ -172,7 +172,7 @@ func (w *Worker) ProcessJobs(ctx context.Context) error {
default:
}
log := kitlog.With(w.log, "job_id", job.ID)
log := w.log.With("job_id", job.ID)
if _, ok := seen[job.ID]; ok {
level.Debug(log).Log("msg", "some jobs failed, retrying on next cron execution")

View file

@ -10,8 +10,8 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/fleet"
"github.com/fleetdm/fleet/v4/server/mock"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/ptr"
kitlog "github.com/go-kit/log"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
"github.com/tj/assert"
@ -56,7 +56,7 @@ func TestWorker(t *testing.T) {
return job, nil
}
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
w := NewWorker(ds, logger)
// register a test job
@ -112,7 +112,7 @@ func TestWorkerRetries(t *testing.T) {
return job, nil
}
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
w := NewWorker(ds, logger)
// register a test job
@ -188,7 +188,7 @@ func TestWorkerMiddleJobFails(t *testing.T) {
return job, nil
}
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
w := NewWorker(ds, logger)
// register a test job
@ -245,7 +245,7 @@ func TestWorkerWithRealDatastore(t *testing.T) {
// call TruncateTables immediately, because a DB migration may create jobs
mysql.TruncateTables(t, ds)
logger := kitlog.NewNopLogger()
logger := logging.NewNopLogger()
w := NewWorker(ds, logger)
w.delayPerRetry = []time.Duration{
1: 0,

View file

@ -27,8 +27,8 @@ import (
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/push/buford"
nanomdm_pushsvc "github.com/fleetdm/fleet/v4/server/mdm/nanomdm/push/service"
"github.com/fleetdm/fleet/v4/server/platform/logging"
"github.com/fleetdm/fleet/v4/server/service"
kitlog "github.com/go-kit/log"
)
func main() {
@ -63,7 +63,7 @@ func main() {
MaxIdleConns: 50,
ConnMaxLifetime: 0,
}
logger := kitlog.NewLogfmtLogger(os.Stderr)
logger := logging.NewLogfmtLogger(os.Stderr)
opts := []mysql.DBOption{
mysql.Logger(logger),
@ -87,7 +87,7 @@ func main() {
})), nil
}))
nanoMDMLogger := service.NewNanoMDMLogger(kitlog.With(logger, "component", "apple-mdm-push"))
nanoMDMLogger := service.NewNanoMDMLogger(logger.With("component", "apple-mdm-push"))
pusher := nanomdm_pushsvc.New(mdmStorage, mdmStorage, pushProviderFactory, nanoMDMLogger)
res, err := pusher.Push(context.Background(), hostUUIDs)
if err != nil {