mirror of
https://github.com/fleetdm/fleet
synced 2026-04-21 13:37:30 +00:00
Migrating server/worker and related code to slog (#40205)
<!-- Add the related story/sub-task/bug number, like Resolves #123, or remove if NA --> **Related issue:** Resolves #40054 # Checklist for submitter - [x] Changes file added for user-visible changes in `changes/`, `orbit/changes/` or `ee/fleetd-chrome/changes`. ## Testing - [x] Added/updated automated tests - [x] QA'd all new/changed functionality manually <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit ## Release Notes * **Refactor** * Updated logging infrastructure across background jobs and worker services to use standardized structured logging, improving consistency and log output formatting across the system. <!-- end of auto-generated comment: release notes by coderabbit.ai -->
This commit is contained in:
parent
3d81a21ace
commit
763fbf318d
31 changed files with 257 additions and 268 deletions
|
|
@ -261,7 +261,7 @@ func scanVulnerabilities(
|
|||
if err := worker.QueueJiraVulnJobs(
|
||||
automationCtx,
|
||||
ds,
|
||||
logger.With("jira", "vulnerabilities"),
|
||||
logger.SlogLogger().With("jira", "vulnerabilities"),
|
||||
recentV,
|
||||
matchingMeta,
|
||||
); err != nil {
|
||||
|
|
@ -273,7 +273,7 @@ func scanVulnerabilities(
|
|||
if err := worker.QueueZendeskVulnJobs(
|
||||
automationCtx,
|
||||
ds,
|
||||
logger.With("zendesk", "vulnerabilities"),
|
||||
logger.SlogLogger().With("zendesk", "vulnerabilities"),
|
||||
recentV,
|
||||
matchingMeta,
|
||||
); err != nil {
|
||||
|
|
@ -692,7 +692,7 @@ func triggerFailingPoliciesAutomation(
|
|||
if err != nil {
|
||||
return ctxerr.Wrapf(ctx, err, "listing hosts for failing policies set %d", policy.ID)
|
||||
}
|
||||
if err := worker.QueueJiraFailingPolicyJob(ctx, ds, logger, policy, hosts); err != nil {
|
||||
if err := worker.QueueJiraFailingPolicyJob(ctx, ds, logger.SlogLogger(), policy, hosts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := failingPoliciesSet.RemoveHosts(policy.ID, hosts); err != nil {
|
||||
|
|
@ -704,7 +704,7 @@ func triggerFailingPoliciesAutomation(
|
|||
if err != nil {
|
||||
return ctxerr.Wrapf(ctx, err, "listing hosts for failing policies set %d", policy.ID)
|
||||
}
|
||||
if err := worker.QueueZendeskFailingPolicyJob(ctx, ds, logger, policy, hosts); err != nil {
|
||||
if err := worker.QueueZendeskFailingPolicyJob(ctx, ds, logger.SlogLogger(), policy, hosts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := failingPoliciesSet.RemoveHosts(policy.ID, hosts); err != nil {
|
||||
|
|
@ -747,17 +747,17 @@ func newWorkerIntegrationsSchedule(
|
|||
// create the worker and register the Jira and Zendesk jobs even if no
|
||||
// integration is enabled, as that config can change live (and if it's not
|
||||
// there won't be any records to process so it will mostly just sleep).
|
||||
w := worker.NewWorker(ds, logger)
|
||||
w := worker.NewWorker(ds, logger.SlogLogger())
|
||||
// leave the url empty for now, will be filled when the lock is acquired with
|
||||
// the up-to-date config.
|
||||
jira := &worker.Jira{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
NewClientFunc: newJiraClient,
|
||||
}
|
||||
zendesk := &worker.Zendesk{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
NewClientFunc: newZendeskClient,
|
||||
}
|
||||
var (
|
||||
|
|
@ -773,29 +773,29 @@ func newWorkerIntegrationsSchedule(
|
|||
}
|
||||
macosSetupAsst := &worker.MacosSetupAssistant{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
DEPService: depSvc,
|
||||
DEPClient: depCli,
|
||||
}
|
||||
appleMDM := &worker.AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
Commander: commander,
|
||||
BootstrapPackageStore: bootstrapPackageStore,
|
||||
VPPInstaller: vppInstaller,
|
||||
}
|
||||
vppVerify := &worker.AppleSoftware{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
Commander: commander,
|
||||
}
|
||||
dbMigrate := &worker.DBMigration{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
}
|
||||
softwareWorker := &worker.SoftwareWorker{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
AndroidModule: androidModule,
|
||||
}
|
||||
w.Register(jira, zendesk, macosSetupAsst, appleMDM, dbMigrate, vppVerify, softwareWorker)
|
||||
|
|
@ -839,7 +839,7 @@ func newWorkerIntegrationsSchedule(
|
|||
return nil
|
||||
}),
|
||||
schedule.WithJob("dep_cooldowns", func(ctx context.Context) error {
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger)
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger.SlogLogger())
|
||||
}),
|
||||
)
|
||||
|
||||
|
|
@ -1870,11 +1870,11 @@ func newBatchActivitiesSchedule(
|
|||
|
||||
logger = logger.With("cron", name)
|
||||
|
||||
w := worker.NewWorker(ds, logger)
|
||||
w := worker.NewWorker(ds, logger.SlogLogger())
|
||||
|
||||
scriptsJob := &worker.BatchScripts{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
}
|
||||
|
||||
w.Register(scriptsJob)
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ func (svc *Service) updateAppConfigMDMAppleSetup(ctx context.Context, payload fl
|
|||
}
|
||||
|
||||
func (svc *Service) updateMacOSSetupEnableEndUserAuth(ctx context.Context, enable bool, teamID *uint, teamName *string) error {
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, svc.ds, svc.logger, worker.MacosSetupAssistantUpdateProfile, teamID); err != nil {
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, svc.ds, svc.logger.SlogLogger(), worker.MacosSetupAssistantUpdateProfile, teamID); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue macos setup assistant update profile job")
|
||||
}
|
||||
|
||||
|
|
@ -657,7 +657,7 @@ func (svc *Service) SetOrUpdateMDMAppleSetupAssistant(ctx context.Context, asst
|
|||
if _, err := worker.QueueMacosSetupAssistantJob(
|
||||
ctx,
|
||||
svc.ds,
|
||||
svc.logger,
|
||||
svc.logger.SlogLogger(),
|
||||
worker.MacosSetupAssistantProfileChanged,
|
||||
newAsst.TeamID); err != nil {
|
||||
return nil, ctxerr.Wrap(ctx, err, "enqueue macos setup assistant profile changed job")
|
||||
|
|
@ -702,7 +702,7 @@ func (svc *Service) DeleteMDMAppleSetupAssistant(ctx context.Context, teamID *ui
|
|||
if _, err := worker.QueueMacosSetupAssistantJob(
|
||||
ctx,
|
||||
svc.ds,
|
||||
svc.logger,
|
||||
svc.logger.SlogLogger(),
|
||||
worker.MacosSetupAssistantProfileDeleted,
|
||||
teamID); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "enqueue macos setup assistant profile deleted job")
|
||||
|
|
@ -988,7 +988,7 @@ func (svc *Service) mdmSSOHandleCallbackAuth(
|
|||
}
|
||||
|
||||
func (svc *Service) mdmAppleSyncDEPProfiles(ctx context.Context) error {
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, svc.ds, svc.logger, worker.MacosSetupAssistantUpdateAllProfiles, nil); err != nil {
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, svc.ds, svc.logger.SlogLogger(), worker.MacosSetupAssistantUpdateAllProfiles, nil); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue macos setup assistant update all profiles job")
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -826,7 +826,7 @@ func (svc *Service) deleteVPPApp(ctx context.Context, teamID *uint, meta *fleet.
|
|||
if err != nil {
|
||||
return &fleet.BadRequestError{Message: "Android MDM is not enabled", InternalErr: err}
|
||||
}
|
||||
err = worker.QueueMakeAndroidAppUnavailableJob(ctx, svc.ds, svc.logger, meta.VPPAppID.AdamID, androidHostsUUIDToPolicyID, enterprise.Name())
|
||||
err = worker.QueueMakeAndroidAppUnavailableJob(ctx, svc.ds, svc.logger.SlogLogger(), meta.VPPAppID.AdamID, androidHostsUUIDToPolicyID, enterprise.Name())
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "enqueuing job to make android app unavailable")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -740,7 +740,7 @@ func (svc *Service) DeleteTeam(ctx context.Context, teamID uint) error {
|
|||
if _, err := worker.QueueMacosSetupAssistantJob(
|
||||
ctx,
|
||||
svc.ds,
|
||||
svc.logger,
|
||||
svc.logger.SlogLogger(),
|
||||
worker.MacosSetupAssistantTeamDeleted,
|
||||
nil,
|
||||
mdmHostSerials...); err != nil {
|
||||
|
|
|
|||
|
|
@ -440,7 +440,7 @@ func (svc *Service) BatchAssociateVPPApps(ctx context.Context, teamName string,
|
|||
|
||||
if len(androidHostPoliciesToUpdate) > 0 && enterprise != nil {
|
||||
for hostUUID, policyID := range androidHostPoliciesToUpdate {
|
||||
err := worker.QueueBulkSetAndroidAppsAvailableForHost(ctx, svc.ds, svc.logger, hostUUID, policyID, appIDs, enterprise.Name())
|
||||
err := worker.QueueBulkSetAndroidAppsAvailableForHost(ctx, svc.ds, svc.logger.SlogLogger(), hostUUID, policyID, appIDs, enterprise.Name())
|
||||
if err != nil {
|
||||
return nil, ctxerr.WrapWithData(
|
||||
ctx,
|
||||
|
|
@ -737,7 +737,7 @@ func (svc *Service) AddAppStoreApp(ctx context.Context, teamID *uint, appID flee
|
|||
return 0, ctxerr.Wrap(ctx, err, "writing VPP app to db")
|
||||
}
|
||||
if appID.Platform == fleet.AndroidPlatform {
|
||||
err := worker.QueueMakeAndroidAppAvailableJob(ctx, svc.ds, svc.logger, appID.AdamID, addedApp.AppTeamID, androidEnterpriseName, androidConfigChanged)
|
||||
err := worker.QueueMakeAndroidAppAvailableJob(ctx, svc.ds, svc.logger.SlogLogger(), appID.AdamID, addedApp.AppTeamID, androidEnterpriseName, androidConfigChanged)
|
||||
if err != nil {
|
||||
return 0, ctxerr.Wrap(ctx, err, "enqueuing job to make android app available")
|
||||
}
|
||||
|
|
@ -1020,7 +1020,7 @@ func (svc *Service) UpdateAppStoreApp(ctx context.Context, titleID uint, teamID
|
|||
if err != nil {
|
||||
return nil, nil, &fleet.BadRequestError{Message: "Android MDM is not enabled", InternalErr: err}
|
||||
}
|
||||
err = worker.QueueMakeAndroidAppAvailableJob(ctx, svc.ds, svc.logger, appToWrite.AdamID, insertedApp.AppTeamID, enterprise.Name(), androidConfigChanged)
|
||||
err = worker.QueueMakeAndroidAppAvailableJob(ctx, svc.ds, svc.logger.SlogLogger(), appToWrite.AdamID, insertedApp.AppTeamID, enterprise.Name(), androidConfigChanged)
|
||||
if err != nil {
|
||||
return nil, nil, ctxerr.Wrap(ctx, err, "enqueuing job to make android app available")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/mdm"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
"github.com/fleetdm/fleet/v4/server/worker"
|
||||
kitlog "github.com/go-kit/log"
|
||||
|
|
@ -52,7 +53,7 @@ type HostOptions struct {
|
|||
// HostLifecycle manages MDM host lifecycle actions
|
||||
type HostLifecycle struct {
|
||||
ds fleet.Datastore
|
||||
logger kitlog.Logger
|
||||
logger *logging.Logger
|
||||
newActivityFunc NewActivityFunc
|
||||
}
|
||||
|
||||
|
|
@ -62,7 +63,7 @@ type HostLifecycle struct {
|
|||
type NewActivityFunc func(ctx context.Context, user *fleet.User, details fleet.ActivityDetails, ds fleet.Datastore, logger kitlog.Logger) error
|
||||
|
||||
// New creates a new HostLifecycle struct
|
||||
func New(ds fleet.Datastore, logger kitlog.Logger, newActivityFn NewActivityFunc) *HostLifecycle {
|
||||
func New(ds fleet.Datastore, logger *logging.Logger, newActivityFn NewActivityFunc) *HostLifecycle {
|
||||
return &HostLifecycle{
|
||||
ds: ds,
|
||||
logger: logger,
|
||||
|
|
@ -245,7 +246,7 @@ func (t *HostLifecycle) turnOnApple(ctx context.Context, opts HostOptions) error
|
|||
err := worker.QueueAppleMDMJob(
|
||||
ctx,
|
||||
t.ds,
|
||||
t.logger,
|
||||
t.logger.SlogLogger(),
|
||||
worker.AppleMDMPostDEPEnrollmentTask,
|
||||
opts.UUID,
|
||||
opts.Platform,
|
||||
|
|
@ -263,7 +264,7 @@ func (t *HostLifecycle) turnOnApple(ctx context.Context, opts HostOptions) error
|
|||
if err := worker.QueueAppleMDMJob(
|
||||
ctx,
|
||||
t.ds,
|
||||
t.logger,
|
||||
t.logger.SlogLogger(),
|
||||
worker.AppleMDMPostManualEnrollmentTask,
|
||||
opts.UUID,
|
||||
opts.Platform,
|
||||
|
|
@ -329,7 +330,7 @@ func (t *HostLifecycle) restorePendingDEPHost(ctx context.Context, host *fleet.H
|
|||
return ctxerr.Wrap(ctx, err, "restore pending dep host")
|
||||
}
|
||||
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, t.ds, t.logger,
|
||||
if _, err := worker.QueueMacosSetupAssistantJob(ctx, t.ds, t.logger.SlogLogger(),
|
||||
worker.MacosSetupAssistantHostsTransferred, tmID, host.HardwareSerial); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue macos setup assistant update profile job")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -16,7 +17,7 @@ func nopNewActivity(ctx context.Context, user *fleet.User, details fleet.Activit
|
|||
|
||||
func TestDoUnsupportedParams(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
lc := New(ds, kitlog.NewNopLogger(), nopNewActivity)
|
||||
lc := New(ds, logging.NewNopLogger(), nopNewActivity)
|
||||
|
||||
err := lc.Do(context.Background(), HostOptions{})
|
||||
require.ErrorContains(t, err, "unsupported platform")
|
||||
|
|
@ -33,7 +34,7 @@ func TestDoUnsupportedParams(t *testing.T) {
|
|||
|
||||
func TestDoParamValidation(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
lf := New(ds, kitlog.NewNopLogger(), nopNewActivity)
|
||||
lf := New(ds, logging.NewNopLogger(), nopNewActivity)
|
||||
ctx := context.Background()
|
||||
|
||||
cases := []struct {
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/nanomdm/mdm"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/fleetdm/fleet/v4/server/worker"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/micromdm/plist"
|
||||
)
|
||||
|
|
@ -63,7 +63,7 @@ func NewInstalledApplicationListResult(ctx context.Context, rawResult []byte, uu
|
|||
func NewInstalledApplicationListResultsHandler(
|
||||
ds fleet.Datastore,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
logger kitlog.Logger,
|
||||
logger *logging.Logger,
|
||||
verifyTimeout, verifyRequestDelay time.Duration,
|
||||
) fleet.MDMCommandResultsHandler {
|
||||
return func(ctx context.Context, commandResults fleet.MDMCommandResults) error {
|
||||
|
|
@ -250,7 +250,7 @@ func NewInstalledApplicationListResultsHandler(
|
|||
// Queue a job to verify the VPP install.
|
||||
return ctxerr.Wrap(
|
||||
ctx,
|
||||
worker.QueueVPPInstallVerificationJob(ctx, ds, logger, verifyRequestDelay,
|
||||
worker.QueueVPPInstallVerificationJob(ctx, ds, logger.SlogLogger(), verifyRequestDelay,
|
||||
installedAppResult.HostUUID(), installedAppResult.UUID(), requireXcodeSpecialCase),
|
||||
"InstalledApplicationList handler: queueing vpp install verification job",
|
||||
)
|
||||
|
|
@ -332,7 +332,7 @@ func NewDeviceLocationResult(result *mdm.CommandResults, hostID uint) (DeviceLoc
|
|||
func NewDeviceLocationResultsHandler(
|
||||
ds fleet.Datastore,
|
||||
commander *apple_mdm.MDMAppleCommander,
|
||||
logger kitlog.Logger,
|
||||
logger *logging.Logger,
|
||||
) fleet.MDMCommandResultsHandler {
|
||||
return func(ctx context.Context, commandResults fleet.MDMCommandResults) error {
|
||||
deviceLocResult, ok := commandResults.(DeviceLocationResult)
|
||||
|
|
|
|||
|
|
@ -1194,7 +1194,7 @@ func (svc *Service) AddHostsToTeam(ctx context.Context, teamID *uint, hostIDs []
|
|||
if _, err := worker.QueueMacosSetupAssistantJob(
|
||||
ctx,
|
||||
svc.ds,
|
||||
svc.logger,
|
||||
svc.logger.SlogLogger(),
|
||||
worker.MacosSetupAssistantHostsTransferred,
|
||||
teamID,
|
||||
serials...); err != nil {
|
||||
|
|
@ -1214,7 +1214,7 @@ func (svc *Service) AddHostsToTeam(ctx context.Context, teamID *uint, hostIDs []
|
|||
return ctxerr.Wrap(ctx, err, "get android enterprise")
|
||||
}
|
||||
|
||||
if err := worker.QueueBulkSetAndroidAppsAvailableForHosts(ctx, svc.ds, svc.logger, androidUUIDs, enterprise.Name()); err != nil {
|
||||
if err := worker.QueueBulkSetAndroidAppsAvailableForHosts(ctx, svc.ds, svc.logger.SlogLogger(), androidUUIDs, enterprise.Name()); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue bulk set available android apps for hosts job")
|
||||
}
|
||||
}
|
||||
|
|
@ -1346,7 +1346,7 @@ func (svc *Service) AddHostsToTeamByFilter(ctx context.Context, teamID *uint, fi
|
|||
if _, err := worker.QueueMacosSetupAssistantJob(
|
||||
ctx,
|
||||
svc.ds,
|
||||
svc.logger,
|
||||
svc.logger.SlogLogger(),
|
||||
worker.MacosSetupAssistantHostsTransferred,
|
||||
teamID,
|
||||
serials...); err != nil {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -32,7 +33,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/service/contract"
|
||||
"github.com/fleetdm/fleet/v4/server/service/redis_key_value"
|
||||
"github.com/fleetdm/fleet/v4/server/worker"
|
||||
kitlog "github.com/go-kit/log"
|
||||
redigo "github.com/gomodule/redigo/redis"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
|
@ -1486,7 +1486,7 @@ func (s *integrationMDMTestSuite) TestDEPProfileAssignment() {
|
|||
checkNoJobsPending()
|
||||
|
||||
// cooldown hosts are screened from update profile jobs that would assign profiles
|
||||
_, err = worker.QueueMacosSetupAssistantJob(ctx, s.ds, kitlog.NewNopLogger(), worker.MacosSetupAssistantUpdateProfile, &dummyTeam.ID, eHost.HardwareSerial)
|
||||
_, err = worker.QueueMacosSetupAssistantJob(ctx, s.ds, slog.New(slog.DiscardHandler), worker.MacosSetupAssistantUpdateProfile, &dummyTeam.ID, eHost.HardwareSerial)
|
||||
require.NoError(t, err)
|
||||
checkPendingMacOSSetupAssistantJob("update_profile", &dummyTeam.ID, []string{eHost.HardwareSerial}, 0)
|
||||
s.runIntegrationsSchedule()
|
||||
|
|
@ -1495,7 +1495,7 @@ func (s *integrationMDMTestSuite) TestDEPProfileAssignment() {
|
|||
checkNoJobsPending()
|
||||
|
||||
// cooldown hosts are screened from delete profile jobs that would assign profiles
|
||||
_, err = worker.QueueMacosSetupAssistantJob(ctx, s.ds, kitlog.NewNopLogger(), worker.MacosSetupAssistantProfileDeleted, &dummyTeam.ID, eHost.HardwareSerial)
|
||||
_, err = worker.QueueMacosSetupAssistantJob(ctx, s.ds, slog.New(slog.DiscardHandler), worker.MacosSetupAssistantProfileDeleted, &dummyTeam.ID, eHost.HardwareSerial)
|
||||
require.NoError(t, err)
|
||||
checkPendingMacOSSetupAssistantJob("profile_deleted", &dummyTeam.ID, []string{eHost.HardwareSerial}, 0)
|
||||
s.runIntegrationsSchedule()
|
||||
|
|
|
|||
|
|
@ -238,26 +238,26 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
|
||||
macosJob := &worker.MacosSetupAssistant{
|
||||
Datastore: s.ds,
|
||||
Log: wlog,
|
||||
Log: wlog.SlogLogger(),
|
||||
DEPService: apple_mdm.NewDEPService(s.ds, depStorage, wlog.SlogLogger()),
|
||||
DEPClient: apple_mdm.NewDEPClient(depStorage, s.ds, wlog.SlogLogger()),
|
||||
}
|
||||
appleMDMJob := &worker.AppleMDM{
|
||||
Datastore: s.ds,
|
||||
Log: wlog,
|
||||
Log: wlog.SlogLogger(),
|
||||
Commander: mdmCommander,
|
||||
}
|
||||
vppVerifyJob := &worker.AppleSoftware{
|
||||
Datastore: s.ds,
|
||||
Log: wlog,
|
||||
Log: wlog.SlogLogger(),
|
||||
Commander: mdmCommander,
|
||||
}
|
||||
softwareWorker := &worker.SoftwareWorker{
|
||||
Datastore: s.ds,
|
||||
Log: wlog,
|
||||
Log: wlog.SlogLogger(),
|
||||
AndroidModule: androidSvc,
|
||||
}
|
||||
workr := worker.NewWorker(s.ds, wlog)
|
||||
workr := worker.NewWorker(s.ds, wlog.SlogLogger())
|
||||
workr.TestIgnoreUnknownJobs = true
|
||||
workr.Register(macosJob, appleMDMJob, vppVerifyJob, softwareWorker)
|
||||
|
||||
|
|
@ -391,7 +391,7 @@ func (s *integrationMDMTestSuite) SetupSuite() {
|
|||
defer s.onIntegrationsScheduleDone()
|
||||
}
|
||||
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger)
|
||||
return worker.ProcessDEPCooldowns(ctx, ds, logger.SlogLogger())
|
||||
}),
|
||||
)
|
||||
return integrationsSchedule, nil
|
||||
|
|
|
|||
|
|
@ -641,7 +641,7 @@ func (svc *Service) processReleaseDeviceForOldFleetd(ctx context.Context, host *
|
|||
}
|
||||
|
||||
// Enroll reference arg is not used in the release device task, passing empty string.
|
||||
if err := worker.QueueAppleMDMJob(ctx, svc.ds, svc.logger, worker.AppleMDMPostDEPReleaseDeviceTask,
|
||||
if err := worker.QueueAppleMDMJob(ctx, svc.ds, svc.logger.SlogLogger(), worker.AppleMDMPostDEPReleaseDeviceTask,
|
||||
host.UUID, host.Platform, host.TeamID, "", false, false, bootstrapCmdUUID, acctConfigCmdUUID); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queue Apple Post-DEP release device job")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -17,8 +18,6 @@ import (
|
|||
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/apple/appmanifest"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
|
|
@ -43,7 +42,7 @@ const (
|
|||
// AppleMDM is the job processor for the apple_mdm job.
|
||||
type AppleMDM struct {
|
||||
Datastore fleet.Datastore
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
Commander *apple_mdm.MDMAppleCommander
|
||||
BootstrapPackageStore fleet.MDMBootstrapPackageStore
|
||||
VPPInstaller fleet.AppleMDMVPPInstaller
|
||||
|
|
@ -168,7 +167,7 @@ func (a *AppleMDM) runPostDEPEnrollment(ctx context.Context, args appleMDMArgs)
|
|||
}
|
||||
|
||||
if args.FromMDMMigration && !allowBootstrapDuringMigration {
|
||||
level.Info(a.Log).Log("info", "skipping bootstrap package installation during MDM migration", "host_uuid", args.HostUUID)
|
||||
a.Log.InfoContext(ctx, "skipping bootstrap package installation during MDM migration", "host_uuid", args.HostUUID)
|
||||
err = a.Datastore.RecordSkippedHostBootstrapPackage(ctx, args.HostUUID)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "recording skipped bootstrap package")
|
||||
|
|
@ -191,7 +190,7 @@ func (a *AppleMDM) runPostDEPEnrollment(ctx context.Context, args appleMDMArgs)
|
|||
}
|
||||
|
||||
if ref := args.EnrollReference; ref != "" {
|
||||
a.Log.Log("info", "got an enroll_reference", "host_uuid", args.HostUUID, "ref", ref)
|
||||
a.Log.InfoContext(ctx, "got an enroll_reference", "host_uuid", args.HostUUID, "ref", ref)
|
||||
if appCfg, err = a.getAppConfig(ctx, appCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -214,7 +213,7 @@ func (a *AppleMDM) runPostDEPEnrollment(ctx context.Context, args appleMDMArgs)
|
|||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "getting idp account display name")
|
||||
}
|
||||
a.Log.Log("info", "setting username and fullname", "host_uuid", args.HostUUID)
|
||||
a.Log.InfoContext(ctx, "setting username and fullname", "host_uuid", args.HostUUID)
|
||||
cmdUUID := uuid.New().String()
|
||||
if err := a.Commander.AccountConfiguration(
|
||||
ctx,
|
||||
|
|
@ -337,9 +336,9 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
args.ReleaseDeviceStartedAt = &now
|
||||
}
|
||||
|
||||
level.Debug(a.Log).Log(
|
||||
a.Log.DebugContext(ctx,
|
||||
fmt.Sprintf("awaiting commands %v and profiles to settle for host %s", args.EnrollmentCommands, args.HostUUID),
|
||||
"task", "runPostDEPReleaseDevice",
|
||||
"msg", fmt.Sprintf("awaiting commands %v and profiles to settle for host %s", args.EnrollmentCommands, args.HostUUID),
|
||||
"attempt", args.ReleaseDeviceAttempt,
|
||||
"started_at", args.ReleaseDeviceStartedAt.Format(time.RFC3339),
|
||||
)
|
||||
|
|
@ -351,7 +350,7 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
// not appear to be reached.
|
||||
if (args.ReleaseDeviceAttempt >= minAttempts && time.Since(*args.ReleaseDeviceStartedAt) >= maxWaitTime) ||
|
||||
(args.ReleaseDeviceAttempt >= maxAttempts) {
|
||||
a.Log.Log("info", "releasing device after too many attempts or too long wait", "host_uuid", args.HostUUID, "attempts", args.ReleaseDeviceAttempt)
|
||||
a.Log.InfoContext(ctx, "releasing device after too many attempts or too long wait", "host_uuid", args.HostUUID, "attempts", args.ReleaseDeviceAttempt)
|
||||
if err := a.Commander.DeviceConfigured(ctx, args.HostUUID, uuid.NewString()); err != nil {
|
||||
return ctxerr.Wrapf(ctx, err, "failed to enqueue DeviceConfigured command after %d attempts", args.ReleaseDeviceAttempt)
|
||||
}
|
||||
|
|
@ -403,9 +402,9 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
}
|
||||
return nil
|
||||
}
|
||||
level.Debug(a.Log).Log(
|
||||
a.Log.DebugContext(ctx,
|
||||
fmt.Sprintf("command %s has completed", cmdUUID),
|
||||
"task", "runPostDEPReleaseDevice",
|
||||
"msg", fmt.Sprintf("command %s has completed", cmdUUID),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -438,9 +437,9 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
}
|
||||
return nil
|
||||
}
|
||||
level.Debug(a.Log).Log(
|
||||
a.Log.DebugContext(ctx,
|
||||
fmt.Sprintf("profile %s has been deployed", prof.Identifier),
|
||||
"task", "runPostDEPReleaseDevice",
|
||||
"msg", fmt.Sprintf("profile %s has been deployed", prof.Identifier),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
@ -454,7 +453,7 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
}
|
||||
|
||||
if len(profilesMissingInstallation) > 0 {
|
||||
level.Info(a.Log).Log("msg", "re-enqueuing due to profiles missing installation", "host_uuid", args.HostUUID)
|
||||
a.Log.InfoContext(ctx, "re-enqueuing due to profiles missing installation", "host_uuid", args.HostUUID)
|
||||
// requeue the task if some profiles are still missing.
|
||||
if err := reenqueueTask(); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "failed to re-enqueue task")
|
||||
|
|
@ -477,7 +476,7 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
}
|
||||
|
||||
if status.Status == fleet.SetupExperienceStatusPending || status.Status == fleet.SetupExperienceStatusRunning {
|
||||
level.Info(a.Log).Log("msg", "re-enqueuing due to setup experience items still pending or running", "host_uuid", args.HostUUID, "status_id", status.ID)
|
||||
a.Log.InfoContext(ctx, "re-enqueuing due to setup experience items still pending or running", "host_uuid", args.HostUUID, "status_id", status.ID)
|
||||
if err := reenqueueTask(); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "failed to re-enqueue task due to pending setup experience items")
|
||||
}
|
||||
|
|
@ -487,7 +486,7 @@ func (a *AppleMDM) runPostDEPReleaseDevice(ctx context.Context, args appleMDMArg
|
|||
}
|
||||
|
||||
// release the device
|
||||
a.Log.Log("info", "releasing device, all DEP enrollment commands and profiles have completed", "host_uuid", args.HostUUID)
|
||||
a.Log.InfoContext(ctx, "releasing device, all DEP enrollment commands and profiles have completed", "host_uuid", args.HostUUID)
|
||||
if err := a.Commander.DeviceConfigured(ctx, args.HostUUID, uuid.NewString()); err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "failed to enqueue DeviceConfigured command")
|
||||
}
|
||||
|
|
@ -500,7 +499,7 @@ func (a *AppleMDM) installFleetd(ctx context.Context, hostUUID string) (string,
|
|||
if err := a.Commander.InstallEnterpriseApplication(ctx, []string{hostUUID}, cmdUUID, manifestURL); err != nil {
|
||||
return "", err
|
||||
}
|
||||
a.Log.Log("info", "sent command to install fleetd", "host_uuid", hostUUID)
|
||||
a.Log.InfoContext(ctx, "sent command to install fleetd", "host_uuid", hostUUID)
|
||||
return cmdUUID, nil
|
||||
}
|
||||
|
||||
|
|
@ -529,7 +528,7 @@ func (a *AppleMDM) installSetupExperienceVPPAppsOnIosIpadOS(ctx context.Context,
|
|||
return nil, ctxerr.Wrap(ctx, err, "updating setup experience status result to failure")
|
||||
}
|
||||
// If we enqueued a non-VPP item for an iOS/iPadOS device, it likely a code bug
|
||||
level.Error(a.Log).Log("msg", "unexpected setup experience item for iOS/iPadOS device, only VPP apps are supported", "host_uuid", hostUUID, "status_id", status.ID)
|
||||
a.Log.ErrorContext(ctx, "unexpected setup experience item for iOS/iPadOS device, only VPP apps are supported", "host_uuid", hostUUID, "status_id", status.ID)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -571,7 +570,7 @@ func (a *AppleMDM) installSetupExperienceVPPAppsOnIosIpadOS(ctx context.Context,
|
|||
// if we get an error (e.g. no available licenses) while attempting to enqueue the
|
||||
// install, then we should immediately go to an error state so setup experience
|
||||
// isn't blocked.
|
||||
level.Error(a.Log).Log("msg", "got an error when attempting to enqueue VPP app install", "err", err, "adam_id", app.VPPAppAdamID)
|
||||
a.Log.ErrorContext(ctx, "got an error when attempting to enqueue VPP app install", "err", err, "adam_id", app.VPPAppAdamID)
|
||||
app.Status = fleet.SetupExperienceStatusFailure
|
||||
app.Error = ptr.String(err.Error())
|
||||
} else {
|
||||
|
|
@ -610,7 +609,7 @@ func (a *AppleMDM) installBootstrapPackage(ctx context.Context, hostUUID string,
|
|||
if err != nil {
|
||||
var nfe fleet.NotFoundError
|
||||
if errors.As(err, &nfe) {
|
||||
a.Log.Log("info", "unable to find a bootstrap package for DEP enrolled device, skipping installation", "host_uuid", hostUUID)
|
||||
a.Log.InfoContext(ctx, "unable to find a bootstrap package for DEP enrolled device, skipping installation", "host_uuid", hostUUID)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
|
@ -642,7 +641,7 @@ func (a *AppleMDM) installBootstrapPackage(ctx context.Context, hostUUID string,
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
a.Log.Log("info", "sent command to install bootstrap package", "host_uuid", hostUUID)
|
||||
a.Log.InfoContext(ctx, "sent command to install bootstrap package", "host_uuid", hostUUID)
|
||||
return cmdUUID, nil
|
||||
}
|
||||
|
||||
|
|
@ -656,16 +655,16 @@ func (a *AppleMDM) getSignedURL(ctx context.Context, meta *fleet.MDMAppleBootstr
|
|||
// no CDN configured, fall back to the MDM URL
|
||||
case err != nil:
|
||||
// log the error but continue with the MDM URL
|
||||
level.Error(a.Log).Log("msg", "failed to sign bootstrap package URL", "err", err)
|
||||
a.Log.ErrorContext(ctx, "failed to sign bootstrap package URL", "err", err)
|
||||
default:
|
||||
exists, err := a.BootstrapPackageStore.Exists(ctx, pkgID)
|
||||
switch {
|
||||
case err != nil:
|
||||
// log the error but continue with the MDM URL
|
||||
level.Error(a.Log).Log("msg", "failed to check if bootstrap package exists", "err", err)
|
||||
a.Log.ErrorContext(ctx, "failed to check if bootstrap package exists", "err", err)
|
||||
case !exists:
|
||||
// log the error but continue with the MDM URL
|
||||
level.Error(a.Log).Log("msg", "bootstrap package does not exist in package store", "pkg_id", pkgID)
|
||||
a.Log.ErrorContext(ctx, "bootstrap package does not exist in package store", "pkg_id", pkgID)
|
||||
default:
|
||||
url = signedURL
|
||||
}
|
||||
|
|
@ -679,7 +678,7 @@ func (a *AppleMDM) getSignedURL(ctx context.Context, meta *fleet.MDMAppleBootstr
|
|||
func QueueAppleMDMJob(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger kitlog.Logger,
|
||||
logger *slog.Logger,
|
||||
task AppleMDMTask,
|
||||
hostUUID string,
|
||||
platform string,
|
||||
|
|
@ -703,7 +702,7 @@ func QueueAppleMDMJob(
|
|||
if len(enrollmentCommandUUIDs) > 0 {
|
||||
attrs = append(attrs, "enrollment_commands", fmt.Sprintf("%v", enrollmentCommandUUIDs))
|
||||
}
|
||||
level.Info(logger).Log(attrs...)
|
||||
logger.InfoContext(ctx, "queuing Apple MDM job", attrs...)
|
||||
|
||||
args := &appleMDMArgs{
|
||||
Task: task,
|
||||
|
|
@ -725,6 +724,6 @@ func QueueAppleMDMJob(
|
|||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
level.Debug(logger).Log("job_id", job.ID)
|
||||
logger.DebugContext(ctx, "queued Apple MDM job", "job_id", job.ID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -101,6 +101,7 @@ func TestAppleMDM(t *testing.T) {
|
|||
// nopLog := logging.NewNopLogger()
|
||||
// use this to debug/verify details of calls
|
||||
nopLog := logging.NewJSONLogger(os.Stdout)
|
||||
slogLog := nopLog.SlogLogger()
|
||||
|
||||
testOrgName := "fleet-test"
|
||||
|
||||
|
|
@ -189,14 +190,14 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
// create a host and enqueue the job
|
||||
h := createEnrolledHost(t, 1, nil, true, "darwin")
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should mark the job as done
|
||||
|
|
@ -218,15 +219,15 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
// create a host and enqueue the job
|
||||
h := createEnrolledHost(t, 1, nil, true, "darwin")
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMTask("no-such-task"), h.UUID, "darwin", nil, "", false, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMTask("no-such-task"), h.UUID, "darwin", nil, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should mark the job as failed
|
||||
|
|
@ -252,14 +253,14 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
// use "" instead of "darwin" as platform to test a queued job after the upgrade to iOS/iPadOS support.
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "", nil, "", false, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "", nil, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -288,13 +289,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -330,13 +331,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -379,13 +380,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -429,13 +430,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -475,13 +476,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, true)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -525,13 +526,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, true)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -576,13 +577,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, true)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -630,13 +631,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, true)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, "", false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -669,13 +670,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "abcd", false, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "abcd", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -712,13 +713,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, idpAcc.UUID, false, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, idpAcc.UUID, false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -765,13 +766,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, idpAcc.UUID, false, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", &tm.ID, idpAcc.UUID, false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -799,13 +800,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostManualEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostManualEnrollmentTask, h.UUID, "darwin", nil, "", false, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -830,13 +831,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -866,13 +867,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -961,13 +962,13 @@ func TestAppleMDM(t *testing.T) {
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -1064,13 +1065,13 @@ INSERT INTO setup_experience_status_results (
|
|||
mdmWorker := &AppleMDM{
|
||||
VPPInstaller: vppInstaller,
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, h.Platform, nil, "", true, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, h.Platform, nil, "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -1229,13 +1230,13 @@ INSERT INTO setup_experience_status_results (
|
|||
mdmWorker := &AppleMDM{
|
||||
VPPInstaller: vppInstaller,
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err = QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, h.Platform, nil, "", true, false)
|
||||
err = QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, h.Platform, nil, "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed
|
||||
|
|
@ -1359,13 +1360,13 @@ INSERT INTO setup_experience_status_results (
|
|||
|
||||
mdmWorker := &AppleMDM{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
Commander: apple_mdm.NewMDMAppleCommander(mdmStorage, mockPusher{}),
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(mdmWorker)
|
||||
|
||||
err := QueueAppleMDMJob(ctx, ds, nopLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
err := QueueAppleMDMJob(ctx, ds, slogLog, AppleMDMPostDEPEnrollmentTask, h.UUID, "darwin", nil, "", true, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// run the worker, should succeed and enqueue the release job
|
||||
|
|
@ -1426,7 +1427,7 @@ func TestGetSignedURL(t *testing.T) {
|
|||
var data []byte
|
||||
buf := bytes.NewBuffer(data)
|
||||
logger := logging.NewLogfmtLogger(buf)
|
||||
a := &AppleMDM{Log: logger}
|
||||
a := &AppleMDM{Log: logger.SlogLogger()}
|
||||
|
||||
// S3 not configured
|
||||
assert.Empty(t, a.getSignedURL(ctx, meta))
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
|
@ -12,7 +13,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ func TestJiraFailer(t *testing.T) {
|
|||
jira := &Jira{
|
||||
FleetURL: "http://example.com",
|
||||
Datastore: ds,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
NewClientFunc: func(opts *externalsvc.JiraOptions) (JiraClient, error) {
|
||||
return failer, nil
|
||||
},
|
||||
|
|
@ -122,7 +122,7 @@ func TestZendeskFailer(t *testing.T) {
|
|||
zendesk := &Zendesk{
|
||||
FleetURL: "http://example.com",
|
||||
Datastore: ds,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
NewClientFunc: func(opts *externalsvc.ZendeskOptions) (ZendeskClient, error) {
|
||||
return failer, nil
|
||||
},
|
||||
|
|
|
|||
|
|
@ -3,15 +3,15 @@ package worker
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
kitlog "github.com/go-kit/log"
|
||||
)
|
||||
|
||||
type BatchScripts struct {
|
||||
Datastore fleet.Datastore
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
}
|
||||
|
||||
func (b *BatchScripts) Name() string {
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
kitlog "github.com/go-kit/log"
|
||||
)
|
||||
|
||||
// Name of the DB migration job as registered in the worker. Note that although
|
||||
|
|
@ -29,7 +29,7 @@ const (
|
|||
// DBMigration is the job processor for the db_migration job.
|
||||
type DBMigration struct {
|
||||
Datastore fleet.Datastore
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
}
|
||||
|
||||
// Name returns the name of the job.
|
||||
|
|
@ -81,7 +81,7 @@ func (m *DBMigration) migrateVPPToken(ctx context.Context) error {
|
|||
// it should've updated, as the location, org name and renew date were all
|
||||
// dummy values after the DB migration. Log something, but otherwise
|
||||
// continue as retrying won't change the result.
|
||||
m.Log.Log("info", "VPP token metadata was not updated")
|
||||
m.Log.InfoContext(ctx, "VPP token metadata was not updated")
|
||||
}
|
||||
|
||||
if _, err := m.Datastore.UpdateVPPToken(ctx, tok.ID, tokenData); err != nil {
|
||||
|
|
|
|||
|
|
@ -28,11 +28,12 @@ func TestDBMigrationsVPPToken(t *testing.T) {
|
|||
// nopLog := logging.NewJSONLogger(os.Stdout)
|
||||
|
||||
// create and register the worker
|
||||
slogLog := nopLog.SlogLogger()
|
||||
processor := &DBMigration{
|
||||
Datastore: ds,
|
||||
Log: nopLog,
|
||||
Log: slogLog,
|
||||
}
|
||||
w := NewWorker(ds, nopLog)
|
||||
w := NewWorker(ds, slogLog)
|
||||
w.Register(processor)
|
||||
|
||||
// create the migrated token and enqueue the job
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sort"
|
||||
"sync"
|
||||
"text/template"
|
||||
|
|
@ -16,8 +17,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
)
|
||||
|
||||
// jiraName is the name of the job as registered in the worker.
|
||||
|
|
@ -122,7 +121,7 @@ type JiraClient interface {
|
|||
type Jira struct {
|
||||
FleetURL string
|
||||
Datastore fleet.Datastore
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
NewClientFunc func(*externalsvc.JiraOptions) (JiraClient, error)
|
||||
|
||||
// mu protects concurrent access to clientsCache, so that the job processor
|
||||
|
|
@ -300,8 +299,7 @@ func (j *Jira) runVuln(ctx context.Context, cli JiraClient, args jiraArgs) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level.Debug(j.Log).Log(
|
||||
"msg", "created jira issue for cve",
|
||||
j.Log.DebugContext(ctx, "created jira issue for cve",
|
||||
"cve", vargs.CVE,
|
||||
"issue_id", createdIssue.ID,
|
||||
"issue_key", createdIssue.Key,
|
||||
|
|
@ -317,8 +315,7 @@ func (j *Jira) runFailingPolicy(ctx context.Context, cli JiraClient, args jiraAr
|
|||
return err
|
||||
}
|
||||
|
||||
attrs := []interface{}{
|
||||
"msg", "created jira issue for failing policy",
|
||||
attrs := []any{
|
||||
"policy_id", args.FailingPolicy.PolicyID,
|
||||
"policy_name", args.FailingPolicy.PolicyName,
|
||||
"issue_id", createdIssue.ID,
|
||||
|
|
@ -327,11 +324,11 @@ func (j *Jira) runFailingPolicy(ctx context.Context, cli JiraClient, args jiraAr
|
|||
if args.FailingPolicy.TeamID != nil {
|
||||
attrs = append(attrs, "team_id", *args.FailingPolicy.TeamID)
|
||||
}
|
||||
level.Debug(j.Log).Log(attrs...)
|
||||
j.Log.DebugContext(ctx, "created jira issue for failing policy", attrs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jira) createTemplatedIssue(ctx context.Context, cli JiraClient, summaryTpl, descTpl *template.Template, args interface{}) (*jira.Issue, error) {
|
||||
func (j *Jira) createTemplatedIssue(ctx context.Context, cli JiraClient, summaryTpl, descTpl *template.Template, args any) (*jira.Issue, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := summaryTpl.Execute(&buf, args); err != nil {
|
||||
return nil, ctxerr.Wrap(ctx, err, "execute summary template")
|
||||
|
|
@ -366,11 +363,11 @@ func (j *Jira) createTemplatedIssue(ctx context.Context, cli JiraClient, summary
|
|||
func QueueJiraVulnJobs(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger kitlog.Logger,
|
||||
logger *slog.Logger,
|
||||
recentVulns []fleet.SoftwareVulnerability,
|
||||
cveMeta map[string]fleet.CVEMeta,
|
||||
) error {
|
||||
level.Info(logger).Log("enabled", "true", "recentVulns", len(recentVulns))
|
||||
logger.InfoContext(ctx, "jira integration enabled", "recent_vulns", len(recentVulns))
|
||||
|
||||
// for troubleshooting, log in debug level the CVEs that we will process
|
||||
// (cannot be done in the loop below as we want to add the debug log
|
||||
|
|
@ -380,7 +377,7 @@ func QueueJiraVulnJobs(
|
|||
cves = append(cves, vuln.GetCVE())
|
||||
}
|
||||
sort.Strings(cves)
|
||||
level.Debug(logger).Log("recent_cves", fmt.Sprintf("%v", cves))
|
||||
logger.DebugContext(ctx, "recent CVEs to process", "recent_cves", fmt.Sprintf("%v", cves))
|
||||
|
||||
cveGrouped := make(map[string][]uint)
|
||||
for _, v := range recentVulns {
|
||||
|
|
@ -399,18 +396,17 @@ func QueueJiraVulnJobs(
|
|||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
level.Debug(logger).Log("job_id", job.ID)
|
||||
logger.DebugContext(ctx, "queued jira vuln job", "job_id", job.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueJiraFailingPolicyJob queues a Jira job for a failing policy to process
|
||||
// asynchronously via the worker.
|
||||
func QueueJiraFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger,
|
||||
func QueueJiraFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logger *slog.Logger,
|
||||
policy *fleet.Policy, hosts []fleet.PolicySetHost,
|
||||
) error {
|
||||
attrs := []interface{}{
|
||||
"enabled", "true",
|
||||
attrs := []any{
|
||||
"failing_policy", policy.ID,
|
||||
"hosts_count", len(hosts),
|
||||
}
|
||||
|
|
@ -418,12 +414,11 @@ func QueueJiraFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logger k
|
|||
attrs = append(attrs, "team_id", *policy.TeamID)
|
||||
}
|
||||
if len(hosts) == 0 {
|
||||
attrs = append(attrs, "msg", "skipping, no host")
|
||||
level.Debug(logger).Log(attrs...)
|
||||
logger.DebugContext(ctx, "skipping, no host", attrs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
level.Info(logger).Log(attrs...)
|
||||
logger.InfoContext(ctx, "queueing Jira failing policy job", attrs...)
|
||||
|
||||
args := &failingPolicyArgs{
|
||||
PolicyID: policy.ID,
|
||||
|
|
@ -436,6 +431,6 @@ func QueueJiraFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logger k
|
|||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
level.Debug(logger).Log("job_id", job.ID)
|
||||
logger.DebugContext(ctx, "queued jira failing policy job", "job_id", job.ID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
@ -203,7 +203,7 @@ func TestJiraRun(t *testing.T) {
|
|||
jira := &Jira{
|
||||
FleetURL: "https://fleetdm.com",
|
||||
Datastore: ds,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
NewClientFunc: func(opts *externalsvc.JiraOptions) (JiraClient, error) {
|
||||
return client, nil
|
||||
},
|
||||
|
|
@ -221,7 +221,7 @@ func TestJiraRun(t *testing.T) {
|
|||
func TestJiraQueueVulnJobs(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := kitlog.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
t.Run("same vulnerability on multiple software only queue one job", func(t *testing.T) {
|
||||
var count int
|
||||
|
|
@ -284,7 +284,7 @@ func TestJiraQueueVulnJobs(t *testing.T) {
|
|||
func TestJiraQueueFailingPolicyJob(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := kitlog.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
t.Run("success global", func(t *testing.T) {
|
||||
ds.NewJobFunc = func(ctx context.Context, job *fleet.Job) (*fleet.Job, error) {
|
||||
|
|
@ -407,7 +407,7 @@ func TestJiraRunClientUpdate(t *testing.T) {
|
|||
jiraJob := &Jira{
|
||||
FleetURL: "http://example.com",
|
||||
Datastore: ds,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
NewClientFunc: func(opts *externalsvc.JiraOptions) (JiraClient, error) {
|
||||
// keep track of project keys received in calls to NewClientFunc
|
||||
projectKeys = append(projectKeys, opts.ProjectKey)
|
||||
|
|
|
|||
|
|
@ -4,13 +4,12 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/nanodep/godep"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
)
|
||||
|
||||
// Name of the macos setup assistant job as registered in the worker. Note that
|
||||
|
|
@ -34,7 +33,7 @@ const (
|
|||
// MacosSetupAssistant is the job processor for the macos_setup_assistant job.
|
||||
type MacosSetupAssistant struct {
|
||||
Datastore fleet.Datastore
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
DEPService *apple_mdm.DEPService
|
||||
DEPClient *godep.Client
|
||||
}
|
||||
|
|
@ -110,10 +109,10 @@ func (m *MacosSetupAssistant) runProfileChanged(ctx context.Context, args macosS
|
|||
if len(skipSerials) > 0 {
|
||||
// NOTE: the `dep_cooldown` job of the `integrations`` cron picks up the assignments
|
||||
// after the cooldown period is over
|
||||
level.Info(m.Log).Log("msg", "run profile changed: skipping assign profile for devices on cooldown", "serials", fmt.Sprintf("%s", skipSerials))
|
||||
m.Log.InfoContext(ctx, "run profile changed: skipping assign profile for devices on cooldown", "serials", fmt.Sprintf("%s", skipSerials))
|
||||
}
|
||||
if len(assignSerials) == 0 {
|
||||
level.Info(m.Log).Log("msg", "run profile changed: no devices to assign profile")
|
||||
m.Log.InfoContext(ctx, "run profile changed: no devices to assign profile")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -183,10 +182,10 @@ func (m *MacosSetupAssistant) runProfileDeleted(ctx context.Context, args macosS
|
|||
if len(skipSerials) > 0 {
|
||||
// NOTE: the `dep_cooldown` job of the `integrations`` cron picks up the assignments
|
||||
// after the cooldown period is over
|
||||
level.Info(m.Log).Log("msg", "run profile deleted: skipping assign profile for devices on cooldown", "serials", fmt.Sprintf("%s", skipSerials))
|
||||
m.Log.InfoContext(ctx, "run profile deleted: skipping assign profile for devices on cooldown", "serials", fmt.Sprintf("%s", skipSerials))
|
||||
}
|
||||
if len(assignSerials) == 0 {
|
||||
level.Info(m.Log).Log("msg", "run profile deleted: no devices to assign profile")
|
||||
m.Log.InfoContext(ctx, "run profile deleted: no devices to assign profile")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -242,11 +241,11 @@ func (m *MacosSetupAssistant) runHostsTransferred(ctx context.Context, args maco
|
|||
} else if len(cooldownSerials) > 0 {
|
||||
// NOTE: the `dep_cooldown` job of the `integrations` cron picks up the assignments
|
||||
// after the cooldown period is over
|
||||
level.Info(m.Log).Log("msg", "run hosts transferred: skipping assign profile for devices on cooldown", "serials", fmt.Sprintf("%s", cooldownSerials))
|
||||
m.Log.InfoContext(ctx, "run hosts transferred: skipping assign profile for devices on cooldown", "serials", fmt.Sprintf("%s", cooldownSerials))
|
||||
}
|
||||
|
||||
if len(assignSerials) == 0 {
|
||||
level.Info(m.Log).Log("msg", "run hosts transferred: no devices to assign profile")
|
||||
m.Log.InfoContext(ctx, "run hosts transferred: no devices to assign profile")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -362,12 +361,12 @@ func (m *MacosSetupAssistant) getTeamNoTeam(ctx context.Context, tmID *uint) (*f
|
|||
func QueueMacosSetupAssistantJob(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger kitlog.Logger,
|
||||
logger *slog.Logger,
|
||||
task MacosSetupAssistantTask,
|
||||
teamID *uint,
|
||||
serialNumbers ...string,
|
||||
) (uint, error) {
|
||||
attrs := []interface{}{
|
||||
attrs := []any{
|
||||
"enabled", "true",
|
||||
macosSetupAssistantJobName, task,
|
||||
"hosts_count", len(serialNumbers),
|
||||
|
|
@ -375,7 +374,7 @@ func QueueMacosSetupAssistantJob(
|
|||
if teamID != nil {
|
||||
attrs = append(attrs, "team_id", *teamID)
|
||||
}
|
||||
level.Info(logger).Log(attrs...)
|
||||
logger.InfoContext(ctx, "queueing macOS setup assistant job", attrs...)
|
||||
|
||||
args := &macosSetupAssistantArgs{
|
||||
Task: task,
|
||||
|
|
@ -386,27 +385,27 @@ func QueueMacosSetupAssistantJob(
|
|||
if err != nil {
|
||||
return 0, ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
level.Debug(logger).Log("job_id", job.ID)
|
||||
logger.DebugContext(ctx, "queued macOS setup assistant job", "job_id", job.ID)
|
||||
return job.ID, nil
|
||||
}
|
||||
|
||||
func ProcessDEPCooldowns(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger) error {
|
||||
func ProcessDEPCooldowns(ctx context.Context, ds fleet.Datastore, logger *slog.Logger) error {
|
||||
serialsByTeamID, err := ds.GetDEPAssignProfileExpiredCooldowns(ctx)
|
||||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "getting cooldowns")
|
||||
}
|
||||
if len(serialsByTeamID) == 0 {
|
||||
level.Info(logger).Log("msg", "no cooldowns to process")
|
||||
logger.InfoContext(ctx, "no cooldowns to process")
|
||||
return nil
|
||||
}
|
||||
|
||||
// queue job for each team so that macOS setup assistant worker can pick it up and process it
|
||||
for teamID, serials := range serialsByTeamID {
|
||||
if len(serials) == 0 {
|
||||
logger.Log("msg", "no cooldowns", "team_id", teamID)
|
||||
logger.InfoContext(ctx, "no cooldowns", "team_id", teamID)
|
||||
continue
|
||||
}
|
||||
level.Info(logger).Log("msg", "processing cooldowns", "team_id", teamID, "serials", serials)
|
||||
logger.InfoContext(ctx, "processing cooldowns", "team_id", teamID, "serials", serials)
|
||||
|
||||
var tid *uint
|
||||
if teamID != 0 {
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
macosJob := &MacosSetupAssistant{
|
||||
Datastore: ds,
|
||||
Log: logger,
|
||||
Log: logger.SlogLogger(),
|
||||
DEPService: apple_mdm.NewDEPService(ds, depStorage, logger.SlogLogger()),
|
||||
DEPClient: apple_mdm.NewDEPClient(depStorage, ds, logger.SlogLogger()),
|
||||
}
|
||||
|
|
@ -144,7 +144,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
err = depStorage.StoreConfig(ctx, org2Name, &nanodep_client.Config{BaseURL: srv.URL})
|
||||
require.NoError(t, err)
|
||||
|
||||
w := NewWorker(ds, logger)
|
||||
w := NewWorker(ds, logger.SlogLogger())
|
||||
w.Register(macosJob)
|
||||
|
||||
runCheckDone := func() {
|
||||
|
|
@ -166,7 +166,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
start := time.Now().Truncate(time.Second)
|
||||
|
||||
// enqueue a regenerate all and process the jobs
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantUpdateAllProfiles, nil)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantUpdateAllProfiles, nil)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -209,7 +209,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, tm1Asst.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantProfileChanged, &tm1.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantProfileChanged, &tm1.ID)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -248,7 +248,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
tm2, err = ds.SaveTeam(ctx, tm2)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantUpdateProfile, &tm2.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantUpdateProfile, &tm2.ID)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -278,11 +278,11 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
require.NotZero(t, tm3Asst.ID)
|
||||
err = ds.DeleteMDMAppleSetupAssistant(ctx, &tm1.ID)
|
||||
require.NoError(t, err)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantProfileChanged, &tm2.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantProfileChanged, &tm2.ID)
|
||||
require.NoError(t, err)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantProfileChanged, &tm3.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantProfileChanged, &tm3.ID)
|
||||
require.NoError(t, err)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantProfileDeleted, &tm1.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantProfileDeleted, &tm1.ID)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -300,7 +300,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
tm2, err = ds.SaveTeam(ctx, tm2)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantUpdateProfile, &tm2.ID)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantUpdateProfile, &tm2.ID)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -319,9 +319,9 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
err = ds.DeleteTeam(ctx, tm2.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantHostsTransferred, &tm3.ID, "serial-2", "serial-4")
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantHostsTransferred, &tm3.ID, "serial-2", "serial-4")
|
||||
require.NoError(t, err)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantTeamDeleted, nil, "serial-5") // hosts[5] was in team 2
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantTeamDeleted, nil, "serial-5") // hosts[5] was in team 2
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -343,7 +343,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NotZero(t, noTmAsst.ID)
|
||||
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantProfileChanged, nil)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantProfileChanged, nil)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
@ -358,7 +358,7 @@ func TestMacosSetupAssistant(t *testing.T) {
|
|||
|
||||
// check that profiles get re-generated (note that timestamps are not
|
||||
// impacted as the content of the profiles did not change)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger, MacosSetupAssistantUpdateAllProfiles, nil)
|
||||
_, err = QueueMacosSetupAssistantJob(ctx, ds, logger.SlogLogger(), MacosSetupAssistantUpdateAllProfiles, nil)
|
||||
require.NoError(t, err)
|
||||
runCheckDone()
|
||||
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/mdm/android"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/api/androidmanagement/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
|
@ -24,7 +22,7 @@ type SoftwareWorkerTask string
|
|||
type SoftwareWorker struct {
|
||||
Datastore fleet.Datastore
|
||||
AndroidModule android.Service
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
}
|
||||
|
||||
func (v *SoftwareWorker) Name() string {
|
||||
|
|
@ -387,7 +385,7 @@ func (v *SoftwareWorker) bulkMakeAndroidAppsAvailableForHost(ctx context.Context
|
|||
// Include the Fleet Agent in the app list so it's not removed when we replace the apps.
|
||||
fleetAgentPolicy, err := v.AndroidModule.BuildFleetAgentApplicationPolicy(ctx, hostUUID)
|
||||
if err != nil {
|
||||
level.Error(v.Log).Log("msg", "failed to build Fleet Agent policy, Fleet Agent may be removed", "host_uuid", hostUUID, "err", err)
|
||||
v.Log.ErrorContext(ctx, "failed to build Fleet Agent policy, Fleet Agent may be removed", "host_uuid", hostUUID, "err", err)
|
||||
} else if fleetAgentPolicy != nil {
|
||||
appPolicies = append(appPolicies, fleetAgentPolicy)
|
||||
}
|
||||
|
|
@ -454,7 +452,7 @@ func QueueRunAndroidSetupExperience(ctx context.Context, ds fleet.Datastore, log
|
|||
return nil
|
||||
}
|
||||
|
||||
func QueueMakeAndroidAppAvailableJob(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger, applicationID string, appTeamID uint, enterpriseName string, appConfigChanged bool) error {
|
||||
func QueueMakeAndroidAppAvailableJob(ctx context.Context, ds fleet.Datastore, logger *slog.Logger, applicationID string, appTeamID uint, enterpriseName string, appConfigChanged bool) error {
|
||||
args := &softwareWorkerArgs{
|
||||
Task: makeAndroidAppAvailableTask,
|
||||
ApplicationID: applicationID,
|
||||
|
|
@ -468,11 +466,11 @@ func QueueMakeAndroidAppAvailableJob(ctx context.Context, ds fleet.Datastore, lo
|
|||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
logger.DebugContext(ctx, "queued software worker job", "job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
return nil
|
||||
}
|
||||
|
||||
func QueueMakeAndroidAppUnavailableJob(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger, applicationID string, hostsUUIDToPolicyID map[string]string, enterpriseName string) error {
|
||||
func QueueMakeAndroidAppUnavailableJob(ctx context.Context, ds fleet.Datastore, logger *slog.Logger, applicationID string, hostsUUIDToPolicyID map[string]string, enterpriseName string) error {
|
||||
args := &softwareWorkerArgs{
|
||||
Task: makeAndroidAppUnavailableTask,
|
||||
ApplicationID: applicationID,
|
||||
|
|
@ -485,14 +483,14 @@ func QueueMakeAndroidAppUnavailableJob(ctx context.Context, ds fleet.Datastore,
|
|||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
logger.DebugContext(ctx, "queued software worker job", "job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
return nil
|
||||
}
|
||||
|
||||
func QueueBulkSetAndroidAppsAvailableForHost(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger kitlog.Logger,
|
||||
logger *slog.Logger,
|
||||
hostUUID string,
|
||||
policyID string,
|
||||
applicationIDs []string,
|
||||
|
|
@ -512,7 +510,7 @@ func QueueBulkSetAndroidAppsAvailableForHost(
|
|||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
logger.DebugContext(ctx, "queued software worker job", "job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -556,7 +554,7 @@ func (v *SoftwareWorker) bulkSetAndroidAppsAvailableForHosts(ctx context.Context
|
|||
// Include the Fleet Agent in the app list so it's not removed when we replace the apps.
|
||||
fleetAgentPolicy, err := v.AndroidModule.BuildFleetAgentApplicationPolicy(ctx, uuid)
|
||||
if err != nil {
|
||||
level.Error(v.Log).Log("msg", "failed to build Fleet Agent policy, Fleet Agent may be removed", "host_uuid", uuid, "err", err)
|
||||
v.Log.ErrorContext(ctx, "failed to build Fleet Agent policy, Fleet Agent may be removed", "host_uuid", uuid, "err", err)
|
||||
} else if fleetAgentPolicy != nil {
|
||||
appPolicies = append(appPolicies, fleetAgentPolicy)
|
||||
}
|
||||
|
|
@ -576,7 +574,7 @@ func (v *SoftwareWorker) bulkSetAndroidAppsAvailableForHosts(ctx context.Context
|
|||
func QueueBulkSetAndroidAppsAvailableForHosts(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger kitlog.Logger,
|
||||
logger *slog.Logger,
|
||||
uuidsToIDs map[string]uint,
|
||||
enterpriseName string) error {
|
||||
|
||||
|
|
@ -591,6 +589,6 @@ func QueueBulkSetAndroidAppsAvailableForHosts(
|
|||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
logger.DebugContext(ctx, "queued software worker job", "job_id", job.ID, "job_name", softwareWorkerJobName, "task", args.Task)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package worker
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"testing"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/datastore/mysql"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mdm/android"
|
||||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/androidmanagement/v1"
|
||||
)
|
||||
|
|
@ -95,7 +95,7 @@ func TestBulkSetAndroidAppsAvailableForHostsPreservesFleetAgent(t *testing.T) {
|
|||
worker := &SoftwareWorker{
|
||||
Datastore: ds,
|
||||
AndroidModule: androidModule,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
}
|
||||
|
||||
err := worker.bulkSetAndroidAppsAvailableForHosts(ctx, map[string]uint{hostUUID: hostID}, "enterprises/test")
|
||||
|
|
@ -149,7 +149,7 @@ func TestBulkMakeAndroidAppsAvailableForHostPreservesFleetAgent(t *testing.T) {
|
|||
worker := &SoftwareWorker{
|
||||
Datastore: ds,
|
||||
AndroidModule: androidModule,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
}
|
||||
|
||||
// Simulate adding a VPP app via BatchAssociateVPPApps
|
||||
|
|
|
|||
|
|
@ -3,13 +3,12 @@ package worker
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
apple_mdm "github.com/fleetdm/fleet/v4/server/mdm/apple"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
)
|
||||
|
||||
const AppleSoftwareJobName = "apple_software"
|
||||
|
|
@ -21,7 +20,7 @@ const verifyVPPTask AppleSoftwareTask = "verify_vpp_installs"
|
|||
type AppleSoftware struct {
|
||||
Datastore fleet.Datastore
|
||||
Commander *apple_mdm.MDMAppleCommander
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
}
|
||||
|
||||
func (v *AppleSoftware) Name() string {
|
||||
|
|
@ -52,7 +51,7 @@ func (v *AppleSoftware) Run(ctx context.Context, argsJSON json.RawMessage) error
|
|||
}
|
||||
|
||||
func (v *AppleSoftware) verifyVPPInstalls(ctx context.Context, hostUUID, verificationCommandUUID string, disableManagedOnlyApps bool) error {
|
||||
level.Debug(v.Log).Log("msg", "verifying VPP installs", "host_uuid", hostUUID, "verification_command_uuid", verificationCommandUUID)
|
||||
v.Log.DebugContext(ctx, "verifying VPP installs", "host_uuid", hostUUID, "verification_command_uuid", verificationCommandUUID)
|
||||
newListCmdUUID := fleet.VerifySoftwareInstallCommandUUID()
|
||||
// for app verification, we always request only managed apps except
|
||||
// if disableManagedOnlyApps is true
|
||||
|
|
@ -69,12 +68,12 @@ func (v *AppleSoftware) verifyVPPInstalls(ctx context.Context, hostUUID, verific
|
|||
return ctxerr.Wrap(ctx, err, "update in-house app install record")
|
||||
}
|
||||
|
||||
level.Debug(v.Log).Log("msg", "new installed application list command sent", "uuid", newListCmdUUID)
|
||||
v.Log.DebugContext(ctx, "new installed application list command sent", "uuid", newListCmdUUID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func QueueVPPInstallVerificationJob(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger, requestDelay time.Duration, hostUUID, verificationCommandUUID string, disableManagedOnly bool) error {
|
||||
func QueueVPPInstallVerificationJob(ctx context.Context, ds fleet.Datastore, logger *slog.Logger, requestDelay time.Duration, hostUUID, verificationCommandUUID string, disableManagedOnly bool) error {
|
||||
args := &appleSoftwareArgs{
|
||||
Task: verifyVPPTask,
|
||||
HostUUID: hostUUID,
|
||||
|
|
@ -87,6 +86,6 @@ func QueueVPPInstallVerificationJob(ctx context.Context, ds fleet.Datastore, log
|
|||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
|
||||
level.Debug(logger).Log("job_id", job.ID, "job_name", appleMDMJobName, "task", args.Task)
|
||||
logger.DebugContext(ctx, "queued VPP install verification job", "job_id", job.ID, "job_name", AppleSoftwareJobName, "task", args.Task)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,12 +4,11 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/ctxerr"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/platform/logging"
|
||||
"github.com/go-kit/log/level"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
|
@ -62,7 +61,7 @@ type vulnArgs struct {
|
|||
// Worker runs jobs. NOT SAFE FOR CONCURRENT USE.
|
||||
type Worker struct {
|
||||
ds fleet.Datastore
|
||||
log *logging.Logger
|
||||
log *slog.Logger
|
||||
|
||||
// For tests only, allows ignoring unknown jobs instead of failing them.
|
||||
TestIgnoreUnknownJobs bool
|
||||
|
|
@ -74,7 +73,7 @@ type Worker struct {
|
|||
registry map[string]Job
|
||||
}
|
||||
|
||||
func NewWorker(ds fleet.Datastore, log *logging.Logger) *Worker {
|
||||
func NewWorker(ds fleet.Datastore, log *slog.Logger) *Worker {
|
||||
return &Worker{
|
||||
ds: ds,
|
||||
log: log,
|
||||
|
|
@ -149,7 +148,7 @@ func (w *Worker) ProcessJobs(ctx context.Context) error {
|
|||
|
||||
jobNames := w.jobNames()
|
||||
if len(jobNames) == 0 {
|
||||
level.Info(w.log).Log("msg", "no jobs registered, nothing to process")
|
||||
w.log.InfoContext(ctx, "no jobs registered, nothing to process")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -175,18 +174,18 @@ func (w *Worker) ProcessJobs(ctx context.Context) error {
|
|||
log := w.log.With("job_id", job.ID)
|
||||
|
||||
if _, ok := seen[job.ID]; ok {
|
||||
level.Debug(log).Log("msg", "some jobs failed, retrying on next cron execution")
|
||||
log.DebugContext(ctx, "some jobs failed, retrying on next cron execution")
|
||||
return nil
|
||||
}
|
||||
seen[job.ID] = struct{}{}
|
||||
|
||||
level.Debug(log).Log("msg", "processing job")
|
||||
log.DebugContext(ctx, "processing job")
|
||||
|
||||
if err := w.processJob(ctx, job); err != nil {
|
||||
level.Error(log).Log("msg", "process job", "err", err)
|
||||
log.ErrorContext(ctx, "process job", "err", err)
|
||||
job.Error = err.Error()
|
||||
if job.Retries < maxRetries {
|
||||
level.Debug(log).Log("msg", "will retry job")
|
||||
log.DebugContext(ctx, "will retry job")
|
||||
job.Retries += 1
|
||||
delays := w.delayPerRetry
|
||||
if delays == nil {
|
||||
|
|
@ -207,7 +206,7 @@ func (w *Worker) ProcessJobs(ctx context.Context) error {
|
|||
// of queue. GetQueuedJobs fetches jobs by updated_at, so it will not return the same job until the queue
|
||||
// has been processed once.
|
||||
if _, err := w.ds.UpdateJob(ctx, job.ID, job); err != nil {
|
||||
level.Error(log).Log("update job", "err", err)
|
||||
log.ErrorContext(ctx, "update job", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func TestWorker(t *testing.T) {
|
|||
}
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
w := NewWorker(ds, logger)
|
||||
w := NewWorker(ds, logger.SlogLogger())
|
||||
|
||||
// register a test job
|
||||
jobCalled := false
|
||||
|
|
@ -113,7 +113,7 @@ func TestWorkerRetries(t *testing.T) {
|
|||
}
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
w := NewWorker(ds, logger)
|
||||
w := NewWorker(ds, logger.SlogLogger())
|
||||
|
||||
// register a test job
|
||||
jobCalled := 0
|
||||
|
|
@ -189,7 +189,7 @@ func TestWorkerMiddleJobFails(t *testing.T) {
|
|||
}
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
w := NewWorker(ds, logger)
|
||||
w := NewWorker(ds, logger.SlogLogger())
|
||||
|
||||
// register a test job
|
||||
var jobCallCount int
|
||||
|
|
@ -246,7 +246,7 @@ func TestWorkerWithRealDatastore(t *testing.T) {
|
|||
mysql.TruncateTables(t, ds)
|
||||
|
||||
logger := logging.NewNopLogger()
|
||||
w := NewWorker(ds, logger)
|
||||
w := NewWorker(ds, logger.SlogLogger())
|
||||
w.delayPerRetry = []time.Duration{
|
||||
1: 0,
|
||||
2: 0,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sort"
|
||||
"sync"
|
||||
"text/template"
|
||||
|
|
@ -15,8 +16,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
"github.com/fleetdm/fleet/v4/server/fleet"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
kitlog "github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
zendesk "github.com/nukosuke/go-zendesk/zendesk"
|
||||
)
|
||||
|
||||
|
|
@ -123,7 +122,7 @@ type ZendeskClient interface {
|
|||
type Zendesk struct {
|
||||
FleetURL string
|
||||
Datastore fleet.Datastore
|
||||
Log kitlog.Logger
|
||||
Log *slog.Logger
|
||||
NewClientFunc func(*externalsvc.ZendeskOptions) (ZendeskClient, error)
|
||||
|
||||
// mu protects concurrent access to clientsCache, so that the job processor
|
||||
|
|
@ -303,8 +302,7 @@ func (z *Zendesk) runVuln(ctx context.Context, cli ZendeskClient, args zendeskAr
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level.Debug(z.Log).Log(
|
||||
"msg", "created zendesk ticket for cve",
|
||||
z.Log.DebugContext(ctx, "created zendesk ticket for cve",
|
||||
"cve", vargs.CVE,
|
||||
"ticket_id", createdTicket.ID,
|
||||
)
|
||||
|
|
@ -319,8 +317,7 @@ func (z *Zendesk) runFailingPolicy(ctx context.Context, cli ZendeskClient, args
|
|||
return err
|
||||
}
|
||||
|
||||
attrs := []interface{}{
|
||||
"msg", "created zendesk ticket for failing policy",
|
||||
attrs := []any{
|
||||
"policy_id", args.FailingPolicy.PolicyID,
|
||||
"policy_name", args.FailingPolicy.PolicyName,
|
||||
"ticket_id", createdTicket.ID,
|
||||
|
|
@ -328,11 +325,11 @@ func (z *Zendesk) runFailingPolicy(ctx context.Context, cli ZendeskClient, args
|
|||
if args.FailingPolicy.TeamID != nil {
|
||||
attrs = append(attrs, "team_id", *args.FailingPolicy.TeamID)
|
||||
}
|
||||
level.Debug(z.Log).Log(attrs...)
|
||||
z.Log.DebugContext(ctx, "created zendesk ticket for failing policy", attrs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *Zendesk) createTemplatedTicket(ctx context.Context, cli ZendeskClient, summaryTpl, descTpl *template.Template, args interface{}) (*zendesk.Ticket, error) {
|
||||
func (z *Zendesk) createTemplatedTicket(ctx context.Context, cli ZendeskClient, summaryTpl, descTpl *template.Template, args any) (*zendesk.Ticket, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := summaryTpl.Execute(&buf, args); err != nil {
|
||||
return nil, ctxerr.Wrap(ctx, err, "execute summary template")
|
||||
|
|
@ -362,11 +359,11 @@ func (z *Zendesk) createTemplatedTicket(ctx context.Context, cli ZendeskClient,
|
|||
func QueueZendeskVulnJobs(
|
||||
ctx context.Context,
|
||||
ds fleet.Datastore,
|
||||
logger kitlog.Logger,
|
||||
logger *slog.Logger,
|
||||
recentVulns []fleet.SoftwareVulnerability,
|
||||
cveMeta map[string]fleet.CVEMeta,
|
||||
) error {
|
||||
level.Info(logger).Log("enabled", "true", "recentVulns", len(recentVulns))
|
||||
logger.InfoContext(ctx, "zendesk integration enabled", "recent_vulns", len(recentVulns))
|
||||
|
||||
// for troubleshooting, log in debug level the CVEs that we will process
|
||||
// (cannot be done in the loop below as we want to add the debug log
|
||||
|
|
@ -376,7 +373,7 @@ func QueueZendeskVulnJobs(
|
|||
cves = append(cves, vuln.GetCVE())
|
||||
}
|
||||
sort.Strings(cves)
|
||||
level.Debug(logger).Log("recent_cves", fmt.Sprintf("%v", cves))
|
||||
logger.DebugContext(ctx, "recent CVEs to process", "recent_cves", fmt.Sprintf("%v", cves))
|
||||
|
||||
cveGrouped := make(map[string][]uint)
|
||||
for _, v := range recentVulns {
|
||||
|
|
@ -395,14 +392,14 @@ func QueueZendeskVulnJobs(
|
|||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
level.Debug(logger).Log("job_id", job.ID)
|
||||
logger.DebugContext(ctx, "queued zendesk vuln job", "job_id", job.ID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueZendeskFailingPolicyJob queues a Zendesk job for a failing policy to
|
||||
// process asynchronously via the worker.
|
||||
func QueueZendeskFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logger kitlog.Logger,
|
||||
func QueueZendeskFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logger *slog.Logger,
|
||||
policy *fleet.Policy, hosts []fleet.PolicySetHost,
|
||||
) error {
|
||||
attrs := []interface{}{
|
||||
|
|
@ -414,12 +411,11 @@ func QueueZendeskFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logge
|
|||
attrs = append(attrs, "team_id", *policy.TeamID)
|
||||
}
|
||||
if len(hosts) == 0 {
|
||||
attrs = append(attrs, "msg", "skipping, no host")
|
||||
level.Debug(logger).Log(attrs...)
|
||||
logger.DebugContext(ctx, "skipping, no host", attrs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
level.Info(logger).Log(attrs...)
|
||||
logger.InfoContext(ctx, "queueing Zendesk failing policy job", attrs...)
|
||||
|
||||
args := &failingPolicyArgs{
|
||||
PolicyID: policy.ID,
|
||||
|
|
@ -432,6 +428,6 @@ func QueueZendeskFailingPolicyJob(ctx context.Context, ds fleet.Datastore, logge
|
|||
if err != nil {
|
||||
return ctxerr.Wrap(ctx, err, "queueing job")
|
||||
}
|
||||
level.Debug(logger).Log("job_id", job.ID)
|
||||
logger.DebugContext(ctx, "queued zendesk failing policy job", "job_id", job.ID)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/ptr"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
kitlog "github.com/go-kit/log"
|
||||
zendesk "github.com/nukosuke/go-zendesk/zendesk"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
|
@ -182,7 +182,7 @@ func TestZendeskRun(t *testing.T) {
|
|||
zendesk := &Zendesk{
|
||||
FleetURL: "https://fleetdm.com",
|
||||
Datastore: ds,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
NewClientFunc: func(opts *externalsvc.ZendeskOptions) (ZendeskClient, error) {
|
||||
return client, nil
|
||||
},
|
||||
|
|
@ -200,7 +200,7 @@ func TestZendeskRun(t *testing.T) {
|
|||
func TestZendeskQueueVulnJobs(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := kitlog.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
t.Run("same vulnerability on multiple software only queue one job", func(t *testing.T) {
|
||||
var count int
|
||||
|
|
@ -263,7 +263,7 @@ func TestZendeskQueueVulnJobs(t *testing.T) {
|
|||
func TestZendeskQueueFailingPolicyJob(t *testing.T) {
|
||||
ds := new(mock.Store)
|
||||
ctx := context.Background()
|
||||
logger := kitlog.NewNopLogger()
|
||||
logger := slog.New(slog.DiscardHandler)
|
||||
|
||||
t.Run("success global", func(t *testing.T) {
|
||||
ds.NewJobFunc = func(ctx context.Context, job *fleet.Job) (*fleet.Job, error) {
|
||||
|
|
@ -386,7 +386,7 @@ func TestZendeskRunClientUpdate(t *testing.T) {
|
|||
zendeskJob := &Zendesk{
|
||||
FleetURL: "http://example.com",
|
||||
Datastore: ds,
|
||||
Log: kitlog.NewNopLogger(),
|
||||
Log: slog.New(slog.DiscardHandler),
|
||||
NewClientFunc: func(opts *externalsvc.ZendeskOptions) (ZendeskClient, error) {
|
||||
// keep track of group IDs received in calls to NewClientFunc
|
||||
groupIDs = append(groupIDs, opts.GroupID)
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
"github.com/fleetdm/fleet/v4/server/worker"
|
||||
kitlog "github.com/go-kit/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
@ -77,7 +77,7 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger := kitlog.NewLogfmtLogger(os.Stdout)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
|
||||
ds := new(mock.Store)
|
||||
ds.HostsByCVEFunc = func(ctx context.Context, cve string) ([]fleet.HostVulnerabilitySummary, error) {
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/fleetdm/fleet/v4/server/contexts/license"
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/fleetdm/fleet/v4/server/mock"
|
||||
"github.com/fleetdm/fleet/v4/server/service/externalsvc"
|
||||
"github.com/fleetdm/fleet/v4/server/worker"
|
||||
kitlog "github.com/go-kit/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
@ -77,7 +77,7 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger := kitlog.NewLogfmtLogger(os.Stdout)
|
||||
logger := slog.New(slog.NewTextHandler(os.Stdout, nil))
|
||||
|
||||
ds := new(mock.Store)
|
||||
ds.HostsByCVEFunc = func(ctx context.Context, cve string) ([]fleet.HostVulnerabilitySummary, error) {
|
||||
|
|
|
|||
Loading…
Reference in a new issue