mirror of
https://github.com/argoproj/argo-cd
synced 2026-04-21 17:07:16 +00:00
chore: enable early-return from revive (#21423)
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
parent
2a497ef1fd
commit
6c45721730
26 changed files with 291 additions and 317 deletions
|
|
@ -105,6 +105,11 @@ linters-settings:
|
|||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
|
||||
- name: duplicated-imports
|
||||
disabled: false
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
|
||||
- name: early-return
|
||||
disabled: false
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
# Empty blocks make code less readable and could be a symptom of a bug or unfinished refactoring.
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
|
|
|
|||
|
|
@ -479,12 +479,11 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
|
|||
errorsByIndex := map[int]error{}
|
||||
namesSet := map[string]bool{}
|
||||
for i, app := range desiredApplications {
|
||||
if !namesSet[app.Name] {
|
||||
namesSet[app.Name] = true
|
||||
} else {
|
||||
if namesSet[app.Name] {
|
||||
errorsByIndex[i] = fmt.Errorf("ApplicationSet %s contains applications with duplicate name: %s", applicationSetInfo.Name, app.Name)
|
||||
continue
|
||||
}
|
||||
namesSet[app.Name] = true
|
||||
|
||||
appProject := &argov1alpha1.AppProject{}
|
||||
err := r.Client.Get(ctx, types.NamespacedName{Name: app.Spec.Project, Namespace: r.ArgoCDNamespace}, appProject)
|
||||
|
|
@ -982,17 +981,16 @@ func (r *ApplicationSetReconciler) buildAppSyncMap(applicationSet argov1alpha1.A
|
|||
}
|
||||
|
||||
appStatus := applicationSet.Status.ApplicationStatus[idx]
|
||||
|
||||
if app, ok := appMap[appName]; ok {
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
app, ok := appMap[appName]
|
||||
if !ok {
|
||||
// application name not found in the list of applications managed by this ApplicationSet, maybe because it's being deleted
|
||||
syncEnabled = false
|
||||
break
|
||||
}
|
||||
syncEnabled = appSyncEnabledForNextStep(&applicationSet, app, appStatus)
|
||||
if !syncEnabled {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -168,60 +168,59 @@ func (g *DuckTypeGenerator) GenerateParams(appSetGenerator *argoprojiov1alpha1.A
|
|||
// Read this outside the loop to improve performance
|
||||
argoClusters := clustersFromArgoCD.Items
|
||||
|
||||
if len(clusterDecisions) > 0 {
|
||||
for _, cluster := range clusterDecisions {
|
||||
// generated instance of cluster params
|
||||
params := map[string]any{}
|
||||
|
||||
log.Infof("cluster: %v", cluster)
|
||||
matchValue := cluster.(map[string]any)[matchKey]
|
||||
if matchValue == nil || matchValue.(string) == "" {
|
||||
log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]any))
|
||||
continue
|
||||
}
|
||||
|
||||
strMatchValue := matchValue.(string)
|
||||
log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD")
|
||||
|
||||
found := false
|
||||
|
||||
for _, argoCluster := range argoClusters {
|
||||
if argoCluster.Name == strMatchValue {
|
||||
log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD")
|
||||
params["name"] = argoCluster.Name
|
||||
params["server"] = argoCluster.Server
|
||||
|
||||
found = true
|
||||
break // Stop looking
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD")
|
||||
continue
|
||||
}
|
||||
|
||||
for key, value := range cluster.(map[string]any) {
|
||||
params[key] = value.(string)
|
||||
}
|
||||
|
||||
for key, value := range appSetGenerator.ClusterDecisionResource.Values {
|
||||
if appSet.Spec.GoTemplate {
|
||||
if params["values"] == nil {
|
||||
params["values"] = map[string]string{}
|
||||
}
|
||||
params["values"].(map[string]string)[key] = value
|
||||
} else {
|
||||
params["values."+key] = value
|
||||
}
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
} else {
|
||||
if len(clusterDecisions) == 0 {
|
||||
log.Warningf("clusterDecisionResource status.%s missing", statusListKey)
|
||||
return nil, nil
|
||||
}
|
||||
for _, cluster := range clusterDecisions {
|
||||
// generated instance of cluster params
|
||||
params := map[string]any{}
|
||||
|
||||
log.Infof("cluster: %v", cluster)
|
||||
matchValue := cluster.(map[string]any)[matchKey]
|
||||
if matchValue == nil || matchValue.(string) == "" {
|
||||
log.Warningf("matchKey=%v not found in \"%v\" list: %v\n", matchKey, statusListKey, cluster.(map[string]any))
|
||||
continue
|
||||
}
|
||||
|
||||
strMatchValue := matchValue.(string)
|
||||
log.WithField(matchKey, strMatchValue).Debug("validate against ArgoCD")
|
||||
|
||||
found := false
|
||||
|
||||
for _, argoCluster := range argoClusters {
|
||||
if argoCluster.Name == strMatchValue {
|
||||
log.WithField(matchKey, argoCluster.Name).Info("matched cluster in ArgoCD")
|
||||
params["name"] = argoCluster.Name
|
||||
params["server"] = argoCluster.Server
|
||||
|
||||
found = true
|
||||
break // Stop looking
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
log.WithField(matchKey, strMatchValue).Warning("unmatched cluster in ArgoCD")
|
||||
continue
|
||||
}
|
||||
|
||||
for key, value := range cluster.(map[string]any) {
|
||||
params[key] = value.(string)
|
||||
}
|
||||
|
||||
for key, value := range appSetGenerator.ClusterDecisionResource.Values {
|
||||
if appSet.Spec.GoTemplate {
|
||||
if params["values"] == nil {
|
||||
params["values"] = map[string]string{}
|
||||
}
|
||||
params["values"].(map[string]string)[key] = value
|
||||
} else {
|
||||
params["values."+key] = value
|
||||
}
|
||||
}
|
||||
|
||||
res = append(res, params)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -577,11 +577,10 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
|||
}
|
||||
log.Fatalf("stream read failed: %v", err)
|
||||
}
|
||||
if !msg.GetLast() {
|
||||
fmt.Println(msg.GetContent())
|
||||
} else {
|
||||
if msg.GetLast() {
|
||||
return
|
||||
}
|
||||
fmt.Println(msg.GetContent())
|
||||
} // Done with receive message
|
||||
} // Done with retry
|
||||
},
|
||||
|
|
@ -2236,18 +2235,17 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
|
|||
|
||||
proj := getProject(ctx, c, clientOpts, app.Spec.Project)
|
||||
foundDiffs = findandPrintDiff(ctx, app, proj.Project, resources, argoSettings, diffOption, ignoreNormalizerOpts)
|
||||
if foundDiffs {
|
||||
if !diffChangesConfirm {
|
||||
yesno := cli.AskToProceed(fmt.Sprintf("Please review changes to application %s shown above. Do you want to continue the sync process? (y/n): ", appQualifiedName))
|
||||
if !yesno {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !foundDiffs {
|
||||
fmt.Printf("====== No Differences found ======\n")
|
||||
// if no differences found, then no need to sync
|
||||
return
|
||||
}
|
||||
if !diffChangesConfirm {
|
||||
yesno := cli.AskToProceed(fmt.Sprintf("Please review changes to application %s shown above. Do you want to continue the sync process? (y/n): ", appQualifiedName))
|
||||
if !yesno {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = appIf.Sync(ctx, &syncReq)
|
||||
errors.CheckError(err)
|
||||
|
|
|
|||
|
|
@ -1205,12 +1205,11 @@ func NewProjectRemoveDestinationServiceAccountCommand(clientOpts *argocdclient.C
|
|||
destServiceAccount.DefaultServiceAccount == serviceAccount
|
||||
},
|
||||
)
|
||||
if originalLength != len(proj.Spec.DestinationServiceAccounts) {
|
||||
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
} else {
|
||||
if originalLength == len(proj.Spec.DestinationServiceAccounts) {
|
||||
log.Fatal("Specified destination service account does not exist in project")
|
||||
}
|
||||
_, err = projIf.Update(ctx, &projectpkg.ProjectUpdateRequest{Project: proj})
|
||||
errors.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -275,11 +275,10 @@ func NewApplicationController(
|
|||
applicationControllerName := env.StringFromEnv(common.EnvAppControllerName, common.DefaultApplicationControllerName)
|
||||
appControllerDeployment, err := deploymentInformer.Lister().Deployments(settingsMgr.GetNamespace()).Get(applicationControllerName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
appControllerDeployment = nil
|
||||
} else {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("error retrieving Application Controller Deployment: %w", err)
|
||||
}
|
||||
appControllerDeployment = nil
|
||||
}
|
||||
if appControllerDeployment != nil {
|
||||
if appControllerDeployment.Spec.Replicas != nil && int(*appControllerDeployment.Spec.Replicas) <= 0 {
|
||||
|
|
@ -1649,9 +1648,8 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
|||
|
||||
if comparisonLevel == ComparisonWithNothing {
|
||||
managedResources := make([]*appv1.ResourceDiff, 0)
|
||||
if err := ctrl.cache.GetAppManagedResources(app.InstanceName(ctrl.namespace), &managedResources); err != nil {
|
||||
logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fall back to full reconciliation")
|
||||
} else {
|
||||
err := ctrl.cache.GetAppManagedResources(app.InstanceName(ctrl.namespace), &managedResources)
|
||||
if err == nil {
|
||||
var tree *appv1.ApplicationTree
|
||||
if tree, err = ctrl.getResourceTree(app, managedResources); err == nil {
|
||||
app.Status.Summary = tree.GetSummary(app)
|
||||
|
|
@ -1664,6 +1662,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
|
|||
patchMs = ctrl.persistAppStatus(origApp, &app.Status)
|
||||
return
|
||||
}
|
||||
logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fall back to full reconciliation")
|
||||
}
|
||||
ts.AddCheckpoint("comparison_with_nothing_ms")
|
||||
|
||||
|
|
@ -2117,22 +2116,22 @@ func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *
|
|||
logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
|
||||
return nil, 0
|
||||
} else if alreadyAttempted && selfHeal {
|
||||
if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
Kind: resource.Kind,
|
||||
Group: resource.Group,
|
||||
Name: resource.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app)
|
||||
if !shouldSelfHeal {
|
||||
logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
|
||||
ctrl.requestAppRefresh(app.QualifiedName(), CompareWithLatest.Pointer(), &retryAfter)
|
||||
return nil, 0
|
||||
}
|
||||
op.Sync.SelfHealAttemptsCount++
|
||||
for _, resource := range resources {
|
||||
if resource.Status != appv1.SyncStatusCodeSynced {
|
||||
op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
|
||||
Kind: resource.Kind,
|
||||
Group: resource.Group,
|
||||
Name: resource.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
ts.AddCheckpoint("already_attempted_check_ms")
|
||||
|
||||
|
|
|
|||
5
controller/cache/cache.go
vendored
5
controller/cache/cache.go
vendored
|
|
@ -329,12 +329,11 @@ func ownerRefGV(ownerRef metav1.OwnerReference) schema.GroupVersion {
|
|||
}
|
||||
|
||||
func getAppRecursive(r *clustercache.Resource, ns map[kube.ResourceKey]*clustercache.Resource, visited map[kube.ResourceKey]bool) (string, bool) {
|
||||
if !visited[r.ResourceKey()] {
|
||||
visited[r.ResourceKey()] = true
|
||||
} else {
|
||||
if visited[r.ResourceKey()] {
|
||||
log.Warnf("Circular dependency detected: %v.", visited)
|
||||
return resInfo(r).AppName, false
|
||||
}
|
||||
visited[r.ResourceKey()] = true
|
||||
|
||||
if resInfo(r).AppName != "" {
|
||||
return resInfo(r).AppName, true
|
||||
|
|
|
|||
|
|
@ -138,15 +138,14 @@ func (c *Consistent) GetLeast(client string) (string, error) {
|
|||
foundItem = c.clients.Min()
|
||||
}
|
||||
key := c.clients.Get(foundItem)
|
||||
if key != nil {
|
||||
host := c.servers[key.(item).value]
|
||||
if c.loadOK(host) {
|
||||
return host, nil
|
||||
}
|
||||
h = key.(item).value
|
||||
} else {
|
||||
if key == nil {
|
||||
return client, nil
|
||||
}
|
||||
host := c.servers[key.(item).value]
|
||||
if c.loadOK(host) {
|
||||
return host, nil
|
||||
}
|
||||
h = key.(item).value
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -464,11 +464,10 @@ func GetClusterSharding(kubeClient kubernetes.Interface, settingsMgr *settings.S
|
|||
return nil, fmt.Errorf("(dynamic cluster distribution) failed to get app controller deployment: %w", err)
|
||||
}
|
||||
|
||||
if appControllerDeployment != nil && appControllerDeployment.Spec.Replicas != nil {
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
if appControllerDeployment == nil || appControllerDeployment.Spec.Replicas == nil {
|
||||
return nil, stderrors.New("(dynamic cluster distribution) failed to get app controller deployment replica count")
|
||||
}
|
||||
replicasCount = int(*appControllerDeployment.Spec.Replicas)
|
||||
} else {
|
||||
replicasCount = env.ParseNumFromEnv(common.EnvControllerReplicas, 0, 0, math.MaxInt32)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2294,15 +2294,14 @@ func (s *Service) GetRevisionMetadata(_ context.Context, q *apiclient.RepoServer
|
|||
// and re-generate the meta data. Otherwise, if there is signature info
|
||||
// in the metadata, but none was requested, we remove it from the data
|
||||
// that we return.
|
||||
if q.CheckSignature && metadata.SignatureInfo == "" {
|
||||
log.Infof("revision metadata cache hit, but need to regenerate due to missing signature info: %s/%s", q.Repo.Repo, q.Revision)
|
||||
} else {
|
||||
if !q.CheckSignature || metadata.SignatureInfo != "" {
|
||||
log.Infof("revision metadata cache hit: %s/%s", q.Repo.Repo, q.Revision)
|
||||
if !q.CheckSignature {
|
||||
metadata.SignatureInfo = ""
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
log.Infof("revision metadata cache hit, but need to regenerate due to missing signature info: %s/%s", q.Repo.Repo, q.Revision)
|
||||
} else {
|
||||
if !errors.Is(err, cache.ErrCacheMiss) {
|
||||
log.Warnf("revision metadata cache error %s/%s: %v", q.Repo.Repo, q.Revision, err)
|
||||
|
|
|
|||
|
|
@ -108,37 +108,35 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
reqNs := ""
|
||||
if ns, ok := r.URL.Query()["namespace"]; ok && enabled {
|
||||
if argo.IsValidNamespaceName(ns[0]) {
|
||||
if security.IsNamespaceEnabled(ns[0], h.namespace, h.enabledNamespaces) {
|
||||
reqNs = ns[0]
|
||||
} else {
|
||||
notFound = true
|
||||
}
|
||||
} else {
|
||||
if !argo.IsValidNamespaceName(ns[0]) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if security.IsNamespaceEnabled(ns[0], h.namespace, h.enabledNamespaces) {
|
||||
reqNs = ns[0]
|
||||
} else {
|
||||
notFound = true
|
||||
}
|
||||
} else {
|
||||
reqNs = h.namespace
|
||||
}
|
||||
|
||||
// Sample url: http://localhost:8080/api/badge?name=123
|
||||
if name, ok := r.URL.Query()["name"]; ok && enabled && !notFound {
|
||||
if argo.IsValidAppName(name[0]) {
|
||||
if app, err := h.appClientset.ArgoprojV1alpha1().Applications(reqNs).Get(context.Background(), name[0], metav1.GetOptions{}); err == nil {
|
||||
health = app.Status.Health.Status
|
||||
status = app.Status.Sync.Status
|
||||
applicationName = name[0]
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.SyncResult != nil {
|
||||
revision = app.Status.OperationState.SyncResult.Revision
|
||||
}
|
||||
} else if errors.IsNotFound(err) {
|
||||
notFound = true
|
||||
}
|
||||
} else {
|
||||
if !argo.IsValidAppName(name[0]) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if app, err := h.appClientset.ArgoprojV1alpha1().Applications(reqNs).Get(context.Background(), name[0], metav1.GetOptions{}); err == nil {
|
||||
health = app.Status.Health.Status
|
||||
status = app.Status.Sync.Status
|
||||
applicationName = name[0]
|
||||
if app.Status.OperationState != nil && app.Status.OperationState.SyncResult != nil {
|
||||
revision = app.Status.OperationState.SyncResult.Revision
|
||||
}
|
||||
} else if errors.IsNotFound(err) {
|
||||
notFound = true
|
||||
}
|
||||
}
|
||||
// Sample url: http://localhost:8080/api/badge?project=default
|
||||
if projects, ok := r.URL.Query()["project"]; ok && enabled && !notFound {
|
||||
|
|
|
|||
|
|
@ -159,23 +159,22 @@ func (s *Server) Create(ctx context.Context, q *cluster.ClusterCreateRequest) (*
|
|||
|
||||
clust, err := s.db.CreateCluster(ctx, c)
|
||||
if err != nil {
|
||||
if status.Convert(err).Code() == codes.AlreadyExists {
|
||||
// act idempotent if existing spec matches new spec
|
||||
existing, getErr := s.db.GetCluster(ctx, c.Server)
|
||||
if getErr != nil {
|
||||
return nil, status.Errorf(codes.Internal, "unable to check existing cluster details: %v", getErr)
|
||||
}
|
||||
|
||||
if existing.Equals(c) {
|
||||
clust = existing
|
||||
} else if q.Upsert {
|
||||
return s.Update(ctx, &cluster.ClusterUpdateRequest{Cluster: c})
|
||||
} else {
|
||||
return nil, status.Error(codes.InvalidArgument, argo.GenerateSpecIsDifferentErrorMessage("cluster", existing, c))
|
||||
}
|
||||
} else {
|
||||
if status.Convert(err).Code() != codes.AlreadyExists {
|
||||
return nil, fmt.Errorf("error creating cluster: %w", err)
|
||||
}
|
||||
// act idempotent if existing spec matches new spec
|
||||
existing, getErr := s.db.GetCluster(ctx, c.Server)
|
||||
if getErr != nil {
|
||||
return nil, status.Errorf(codes.Internal, "unable to check existing cluster details: %v", getErr)
|
||||
}
|
||||
|
||||
if existing.Equals(c) {
|
||||
clust = existing
|
||||
} else if q.Upsert {
|
||||
return s.Update(ctx, &cluster.ClusterUpdateRequest{Cluster: c})
|
||||
} else {
|
||||
return nil, status.Error(codes.InvalidArgument, argo.GenerateSpecIsDifferentErrorMessage("cluster", existing, c))
|
||||
}
|
||||
}
|
||||
|
||||
err = s.cache.SetClusterInfo(c.Server, &appv1.ClusterInfo{
|
||||
|
|
|
|||
|
|
@ -265,18 +265,17 @@ func (s *Server) Create(ctx context.Context, q *project.ProjectCreateRequest) (*
|
|||
if getErr != nil {
|
||||
return nil, status.Errorf(codes.Internal, "unable to check existing project details: %v", getErr)
|
||||
}
|
||||
if q.GetUpsert() {
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceProjects, rbacpolicy.ActionUpdate, q.GetProject().Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
existing.Spec = q.GetProject().Spec
|
||||
res, err = s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(ctx, existing, metav1.UpdateOptions{})
|
||||
} else {
|
||||
if !q.GetUpsert() {
|
||||
if !reflect.DeepEqual(existing.Spec, q.GetProject().Spec) {
|
||||
return nil, status.Error(codes.InvalidArgument, argo.GenerateSpecIsDifferentErrorMessage("project", existing.Spec, q.GetProject().Spec))
|
||||
}
|
||||
return existing, nil
|
||||
}
|
||||
if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceProjects, rbacpolicy.ActionUpdate, q.GetProject().Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
existing.Spec = q.GetProject().Spec
|
||||
res, err = s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(ctx, existing, metav1.UpdateOptions{})
|
||||
}
|
||||
if err == nil {
|
||||
s.logEvent(ctx, res, argo.EventReasonResourceCreated, "created project")
|
||||
|
|
@ -533,27 +532,26 @@ func (s *Server) NormalizeProjs() error {
|
|||
}
|
||||
for _, proj := range projList.Items {
|
||||
for i := 0; i < 3; i++ {
|
||||
if proj.NormalizeJWTTokens() {
|
||||
_, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(context.Background(), &proj, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
log.Infof("Successfully normalized project %s.", proj.Name)
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
log.Warnf("Failed normalize project %s", proj.Name)
|
||||
break
|
||||
}
|
||||
projGet, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Get(context.Background(), proj.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Error retrieving project: %s", err.Error())
|
||||
}
|
||||
proj = *projGet
|
||||
if i == 2 {
|
||||
return status.Errorf(codes.Internal, "Failed normalize project %s", proj.Name)
|
||||
}
|
||||
} else {
|
||||
if !proj.NormalizeJWTTokens() {
|
||||
break
|
||||
}
|
||||
_, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(context.Background(), &proj, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
log.Infof("Successfully normalized project %s.", proj.Name)
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
log.Warnf("Failed normalize project %s", proj.Name)
|
||||
break
|
||||
}
|
||||
projGet, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Get(context.Background(), proj.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Error retrieving project: %s", err.Error())
|
||||
}
|
||||
proj = *projGet
|
||||
if i == 2 {
|
||||
return status.Errorf(codes.Internal, "Failed normalize project %s", proj.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -494,12 +494,11 @@ func (a *Actions) Update(toUpdate func(*v1alpha1.ApplicationSet)) *Actions {
|
|||
|
||||
_, err = appSetClientSet.Update(context.Background(), utils.MustToUnstructured(&appSet), metav1.UpdateOptions{})
|
||||
|
||||
if err != nil {
|
||||
mostRecentError = err
|
||||
} else {
|
||||
if err == nil {
|
||||
mostRecentError = nil
|
||||
break
|
||||
}
|
||||
mostRecentError = err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -312,14 +312,13 @@ func waitForSuccess(condition func() error, expireTime time.Time) error {
|
|||
}
|
||||
|
||||
conditionErr := condition()
|
||||
if conditionErr != nil {
|
||||
// Fail!
|
||||
mostRecentError = conditionErr
|
||||
} else {
|
||||
if conditionErr == nil {
|
||||
// Pass!
|
||||
mostRecentError = nil
|
||||
break
|
||||
}
|
||||
// Fail!
|
||||
mostRecentError = conditionErr
|
||||
|
||||
// Wait on fail
|
||||
if sleepIntervalsIdx < len(sleepIntervals)-1 {
|
||||
|
|
|
|||
|
|
@ -243,14 +243,13 @@ func RefreshApp(appIf v1alpha1.ApplicationInterface, name string, refreshType ar
|
|||
}
|
||||
for attempt := 0; attempt < 5; attempt++ {
|
||||
app, err := appIf.Patch(context.Background(), name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsConflict(err) {
|
||||
return nil, fmt.Errorf("error patching annotations in application %q: %w", name, err)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
log.Infof("Requested app '%s' refresh", name)
|
||||
return app, nil
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return nil, fmt.Errorf("error patching annotations in application %q: %w", name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -656,14 +655,14 @@ func ValidatePermissions(ctx context.Context, spec *argoappv1.ApplicationSpec, p
|
|||
// Ensure the k8s cluster the app is referencing, is configured in Argo CD
|
||||
_, err = db.GetCluster(ctx, spec.Destination.Server)
|
||||
if err != nil {
|
||||
if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.NotFound {
|
||||
conditions = append(conditions, argoappv1.ApplicationCondition{
|
||||
Type: argoappv1.ApplicationConditionInvalidSpecError,
|
||||
Message: fmt.Sprintf("cluster '%s' has not been configured", spec.Destination.Server),
|
||||
})
|
||||
} else {
|
||||
errStatus, ok := status.FromError(err)
|
||||
if !(ok && errStatus.Code() == codes.NotFound) {
|
||||
return nil, fmt.Errorf("error getting cluster: %w", err)
|
||||
}
|
||||
conditions = append(conditions, argoappv1.ApplicationCondition{
|
||||
Type: argoappv1.ApplicationConditionInvalidSpecError,
|
||||
Message: fmt.Sprintf("cluster '%s' has not been configured", spec.Destination.Server),
|
||||
})
|
||||
}
|
||||
} else if spec.Destination.Server == "" {
|
||||
conditions = append(conditions, argoappv1.ApplicationCondition{Type: argoappv1.ApplicationConditionInvalidSpecError, Message: errDestinationMissing})
|
||||
|
|
|
|||
|
|
@ -69,18 +69,18 @@ func (db *db) ListConfiguredGPGPublicKeys(_ context.Context) (map[string]*appsv1
|
|||
// This is not optimal, but the executil from argo-pkg does not support writing to
|
||||
// stdin of the forked process. So for now, we must live with that.
|
||||
for k, p := range keysCM.Data {
|
||||
if expectedKeyID := gpg.KeyID(k); expectedKeyID != "" {
|
||||
parsedKey, err := validatePGPKey(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse GPG key for entry '%s': %s", expectedKeyID, err.Error())
|
||||
}
|
||||
if expectedKeyID != parsedKey.KeyID {
|
||||
return nil, fmt.Errorf("Key parsed for entry with key ID '%s' had different key ID '%s'", expectedKeyID, parsedKey.KeyID)
|
||||
}
|
||||
result[parsedKey.KeyID] = parsedKey
|
||||
} else {
|
||||
expectedKeyID := gpg.KeyID(k)
|
||||
if expectedKeyID == "" {
|
||||
return nil, fmt.Errorf("Found entry with key '%s' in ConfigMap, but this is not a valid PGP key ID", k)
|
||||
}
|
||||
parsedKey, err := validatePGPKey(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse GPG key for entry '%s': %s", expectedKeyID, err.Error())
|
||||
}
|
||||
if expectedKeyID != parsedKey.KeyID {
|
||||
return nil, fmt.Errorf("Key parsed for entry with key ID '%s' had different key ID '%s'", expectedKeyID, parsedKey.KeyID)
|
||||
}
|
||||
result[parsedKey.KeyID] = parsedKey
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
|
|
|||
|
|
@ -528,14 +528,13 @@ func (db *db) legacyRepoBackend() repositoryBackend {
|
|||
func (db *db) enrichCredsToRepo(ctx context.Context, repository *v1alpha1.Repository) error {
|
||||
if !repository.HasCredentials() {
|
||||
creds, err := db.GetRepositoryCredentials(ctx, repository.Repo)
|
||||
if err == nil {
|
||||
if creds != nil {
|
||||
repository.CopyCredentialsFrom(creds)
|
||||
repository.InheritedCreds = true
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get repository credentials for %q: %w", repository.Repo, err)
|
||||
}
|
||||
if creds != nil {
|
||||
repository.CopyCredentialsFrom(creds)
|
||||
repository.InheritedCreds = true
|
||||
}
|
||||
} else {
|
||||
log.Debugf("%s has credentials", repository.Repo)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,18 +52,17 @@ func (l *legacyRepositoryBackend) ListRepositories(_ context.Context, repoType *
|
|||
if repoType == nil || *repoType == inRepo.Type {
|
||||
r, err := l.tryGetRepository(inRepo.URL)
|
||||
if err != nil {
|
||||
if r != nil && errors.IsCredentialsConfigurationError(err) {
|
||||
modifiedTime := metav1.Now()
|
||||
r.ConnectionState = appsv1.ConnectionState{
|
||||
Status: appsv1.ConnectionStatusFailed,
|
||||
Message: "Configuration error - please check the server logs",
|
||||
ModifiedAt: &modifiedTime,
|
||||
}
|
||||
|
||||
log.Warnf("could not retrieve repo: %s", err.Error())
|
||||
} else {
|
||||
if r == nil || !errors.IsCredentialsConfigurationError(err) {
|
||||
return nil, err
|
||||
}
|
||||
modifiedTime := metav1.Now()
|
||||
r.ConnectionState = appsv1.ConnectionState{
|
||||
Status: appsv1.ConnectionStatusFailed,
|
||||
Message: "Configuration error - please check the server logs",
|
||||
ModifiedAt: &modifiedTime,
|
||||
}
|
||||
|
||||
log.Warnf("could not retrieve repo: %s", err.Error())
|
||||
}
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -112,18 +112,17 @@ func (s *secretsRepositoryBackend) ListRepositories(_ context.Context, repoType
|
|||
for _, secret := range secrets {
|
||||
r, err := secretToRepository(secret)
|
||||
if err != nil {
|
||||
if r != nil {
|
||||
modifiedTime := metav1.Now()
|
||||
r.ConnectionState = appsv1.ConnectionState{
|
||||
Status: appsv1.ConnectionStatusFailed,
|
||||
Message: "Configuration error - please check the server logs",
|
||||
ModifiedAt: &modifiedTime,
|
||||
}
|
||||
|
||||
log.Warnf("Error while parsing repository secret '%s': %v", secret.Name, err)
|
||||
} else {
|
||||
if r == nil {
|
||||
return nil, err
|
||||
}
|
||||
modifiedTime := metav1.Now()
|
||||
r.ConnectionState = appsv1.ConnectionState{
|
||||
Status: appsv1.ConnectionStatusFailed,
|
||||
Message: "Configuration error - please check the server logs",
|
||||
ModifiedAt: &modifiedTime,
|
||||
}
|
||||
|
||||
log.Warnf("Error while parsing repository secret '%s': %v", secret.Name, err)
|
||||
}
|
||||
|
||||
if repoType == nil || *repoType == r.Type {
|
||||
|
|
|
|||
|
|
@ -480,15 +480,15 @@ func (m *nativeGitClient) Checkout(revision string, submoduleEnabled bool) (stri
|
|||
// We must populate LFS content by using lfs checkout, if we have at least
|
||||
// one LFS reference in the current revision.
|
||||
if m.IsLFSEnabled() {
|
||||
if largeFiles, err := m.LsLargeFiles(); err == nil {
|
||||
if len(largeFiles) > 0 {
|
||||
if out, err := m.runCmd("lfs", "checkout"); err != nil {
|
||||
return out, fmt.Errorf("failed to checkout LFS files: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
largeFiles, err := m.LsLargeFiles()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to list LFS files: %w", err)
|
||||
}
|
||||
if len(largeFiles) > 0 {
|
||||
if out, err := m.runCmd("lfs", "checkout"); err != nil {
|
||||
return out, fmt.Errorf("failed to checkout LFS files: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(m.root + "/.gitmodules"); !os.IsNotExist(err) {
|
||||
if submoduleEnabled {
|
||||
|
|
@ -852,14 +852,13 @@ func (m *nativeGitClient) CheckoutOrOrphan(branch string, submoduleEnabled bool)
|
|||
out, err := m.Checkout(branch, submoduleEnabled)
|
||||
if err != nil {
|
||||
// If the branch doesn't exist, create it as an orphan branch.
|
||||
if strings.Contains(err.Error(), "did not match any file(s) known to git") {
|
||||
out, err = m.runCmd("switch", "--orphan", branch)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to create orphan branch: %w", err)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), "did not match any file(s) known to git") {
|
||||
return out, fmt.Errorf("failed to checkout branch: %w", err)
|
||||
}
|
||||
out, err = m.runCmd("switch", "--orphan", branch)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to create orphan branch: %w", err)
|
||||
}
|
||||
|
||||
// Make an empty initial commit.
|
||||
out, err = m.runCmd("commit", "--allow-empty", "-m", "Initial commit")
|
||||
|
|
@ -881,21 +880,20 @@ func (m *nativeGitClient) CheckoutOrOrphan(branch string, submoduleEnabled bool)
|
|||
func (m *nativeGitClient) CheckoutOrNew(branch, base string, submoduleEnabled bool) (string, error) {
|
||||
out, err := m.Checkout(branch, submoduleEnabled)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "did not match any file(s) known to git") {
|
||||
// If the branch does not exist, create any empty branch based on the sync branch
|
||||
// First, checkout the sync branch.
|
||||
out, err = m.Checkout(base, submoduleEnabled)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to checkout sync branch: %w", err)
|
||||
}
|
||||
|
||||
out, err = m.runCmd("checkout", "-b", branch)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to create branch: %w", err)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), "did not match any file(s) known to git") {
|
||||
return out, fmt.Errorf("failed to checkout branch: %w", err)
|
||||
}
|
||||
// If the branch does not exist, create any empty branch based on the sync branch
|
||||
// First, checkout the sync branch.
|
||||
out, err = m.Checkout(base, submoduleEnabled)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to checkout sync branch: %w", err)
|
||||
}
|
||||
|
||||
out, err = m.runCmd("checkout", "-b", branch)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("failed to create branch: %w", err)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -191,20 +191,19 @@ func (creds HTTPSCreds) Environ() (io.Closer, []string, error) {
|
|||
// another for storing the key. If we fail to create second fail, the first
|
||||
// must be removed.
|
||||
certFile, err := os.CreateTemp(argoio.TempDir, "")
|
||||
if err == nil {
|
||||
defer certFile.Close()
|
||||
keyFile, err = os.CreateTemp(argoio.TempDir, "")
|
||||
if err != nil {
|
||||
removeErr := os.Remove(certFile.Name())
|
||||
if removeErr != nil {
|
||||
log.Errorf("Could not remove previously created tempfile %s: %v", certFile.Name(), removeErr)
|
||||
}
|
||||
return NopCloser{}, nil, err
|
||||
}
|
||||
defer keyFile.Close()
|
||||
} else {
|
||||
if err != nil {
|
||||
return NopCloser{}, nil, err
|
||||
}
|
||||
defer certFile.Close()
|
||||
keyFile, err = os.CreateTemp(argoio.TempDir, "")
|
||||
if err != nil {
|
||||
removeErr := os.Remove(certFile.Name())
|
||||
if removeErr != nil {
|
||||
log.Errorf("Could not remove previously created tempfile %s: %v", certFile.Name(), removeErr)
|
||||
}
|
||||
return NopCloser{}, nil, err
|
||||
}
|
||||
defer keyFile.Close()
|
||||
|
||||
// We should have both temp files by now
|
||||
httpCloser = authFilePaths([]string{certFile.Name(), keyFile.Name()})
|
||||
|
|
@ -401,20 +400,19 @@ func (g GitHubAppCreds) Environ() (io.Closer, []string, error) {
|
|||
// another for storing the key. If we fail to create second fail, the first
|
||||
// must be removed.
|
||||
certFile, err := os.CreateTemp(argoio.TempDir, "")
|
||||
if err == nil {
|
||||
defer certFile.Close()
|
||||
keyFile, err = os.CreateTemp(argoio.TempDir, "")
|
||||
if err != nil {
|
||||
removeErr := os.Remove(certFile.Name())
|
||||
if removeErr != nil {
|
||||
log.Errorf("Could not remove previously created tempfile %s: %v", certFile.Name(), removeErr)
|
||||
}
|
||||
return NopCloser{}, nil, err
|
||||
}
|
||||
defer keyFile.Close()
|
||||
} else {
|
||||
if err != nil {
|
||||
return NopCloser{}, nil, err
|
||||
}
|
||||
defer certFile.Close()
|
||||
keyFile, err = os.CreateTemp(argoio.TempDir, "")
|
||||
if err != nil {
|
||||
removeErr := os.Remove(certFile.Name())
|
||||
if removeErr != nil {
|
||||
log.Errorf("Could not remove previously created tempfile %s: %v", certFile.Name(), removeErr)
|
||||
}
|
||||
return NopCloser{}, nil, err
|
||||
}
|
||||
defer keyFile.Close()
|
||||
|
||||
// We should have both temp files by now
|
||||
httpCloser = authFilePaths([]string{certFile.Name(), keyFile.Name()})
|
||||
|
|
|
|||
|
|
@ -436,11 +436,10 @@ func cleanupChartLockFile(chartPath string) (func(), error) {
|
|||
exists := true
|
||||
lockPath := path.Join(chartPath, "Chart.lock")
|
||||
if _, err := os.Stat(lockPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
exists = false
|
||||
} else {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("failed to check lock file status: %w", err)
|
||||
}
|
||||
exists = false
|
||||
}
|
||||
return func() {
|
||||
if !exists {
|
||||
|
|
|
|||
|
|
@ -303,41 +303,40 @@ func (vm VM) ExecuteResourceActionDiscovery(obj *unstructured.Unstructured, scri
|
|||
return nil, err
|
||||
}
|
||||
returnValue := l.Get(-1)
|
||||
if returnValue.Type() == lua.LTTable {
|
||||
jsonBytes, err := luajson.Encode(returnValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting to lua table: %w", err)
|
||||
}
|
||||
if noAvailableActions(jsonBytes) {
|
||||
if returnValue.Type() != lua.LTTable {
|
||||
return nil, fmt.Errorf(incorrectReturnType, "table", returnValue.Type().String())
|
||||
}
|
||||
jsonBytes, err := luajson.Encode(returnValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting to lua table: %w", err)
|
||||
}
|
||||
if noAvailableActions(jsonBytes) {
|
||||
continue
|
||||
}
|
||||
actionsMap := make(map[string]any)
|
||||
err = json.Unmarshal(jsonBytes, &actionsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling action table: %w", err)
|
||||
}
|
||||
for key, value := range actionsMap {
|
||||
resourceAction := appv1.ResourceAction{Name: key, Disabled: isActionDisabled(value)}
|
||||
if _, exist := availableActionsMap[key]; exist {
|
||||
continue
|
||||
}
|
||||
actionsMap := make(map[string]any)
|
||||
err = json.Unmarshal(jsonBytes, &actionsMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling action table: %w", err)
|
||||
}
|
||||
for key, value := range actionsMap {
|
||||
resourceAction := appv1.ResourceAction{Name: key, Disabled: isActionDisabled(value)}
|
||||
if _, exist := availableActionsMap[key]; exist {
|
||||
continue
|
||||
}
|
||||
if emptyResourceActionFromLua(value) {
|
||||
availableActionsMap[key] = resourceAction
|
||||
continue
|
||||
}
|
||||
resourceActionBytes, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling resource action: %w", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resourceActionBytes, &resourceAction)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling resource action: %w", err)
|
||||
}
|
||||
if emptyResourceActionFromLua(value) {
|
||||
availableActionsMap[key] = resourceAction
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf(incorrectReturnType, "table", returnValue.Type().String())
|
||||
resourceActionBytes, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling resource action: %w", err)
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resourceActionBytes, &resourceAction)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling resource action: %w", err)
|
||||
}
|
||||
availableActionsMap[key] = resourceAction
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -387,11 +386,10 @@ func (vm VM) GetResourceActionDiscovery(obj *unstructured.Unstructured) ([]strin
|
|||
return nil, err
|
||||
}
|
||||
// Append the action discovery Lua script if built-in actions are to be included
|
||||
if actions.MergeBuiltinActions {
|
||||
discoveryScripts = append(discoveryScripts, actions.ActionDiscoveryLua)
|
||||
} else {
|
||||
if !actions.MergeBuiltinActions {
|
||||
return []string{actions.ActionDiscoveryLua}, nil
|
||||
}
|
||||
discoveryScripts = append(discoveryScripts, actions.ActionDiscoveryLua)
|
||||
}
|
||||
|
||||
// Fetch predefined Lua scripts
|
||||
|
|
|
|||
|
|
@ -564,12 +564,11 @@ func (a *ClientApp) GetUserInfo(actualClaims jwt.MapClaims, issuerURL, userInfoP
|
|||
log.Errorf("decrypting the cached claims failed (sub=%s): %s", sub, err)
|
||||
} else {
|
||||
err = json.Unmarshal(claimsRaw, &claims)
|
||||
if err != nil {
|
||||
log.Errorf("cannot unmarshal cached claims structure: %s", err)
|
||||
} else {
|
||||
if err == nil {
|
||||
// return the cached claims since they are not yet expired, were successfully decrypted and unmarshaled
|
||||
return claims, false, err
|
||||
}
|
||||
log.Errorf("cannot unmarshal cached claims structure: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -89,11 +89,10 @@ func getTLSCipherSuitesByString(cipherSuites string) ([]uint16, error) {
|
|||
allowedSuites := make([]uint16, 0)
|
||||
for _, s := range strings.Split(cipherSuites, ":") {
|
||||
id, ok := suiteMap[strings.TrimSpace(s)]
|
||||
if ok {
|
||||
allowedSuites = append(allowedSuites, id)
|
||||
} else {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid cipher suite specified: %s", s)
|
||||
}
|
||||
allowedSuites = append(allowedSuites, id)
|
||||
}
|
||||
return allowedSuites, nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue