mirror of
https://github.com/beclab/Olares
synced 2026-04-21 13:37:46 +00:00
refactor: integrate app service into main repo (#2156)
* refactor: integrate app service into main repo * Delete framework/app-service/LICENSE.md * fix(manifest): remove unused manager deploy file * refactor: change the output dir of CRDs to the standard path --------- Co-authored-by: Peng Peng <billpengpeng@gmail.com>
This commit is contained in:
parent
ba8868d771
commit
af9e1993d1
317 changed files with 51957 additions and 4 deletions
24
.github/workflows/module_appservice_build_main.yaml
vendored
Normal file
24
.github/workflows/module_appservice_build_main.yaml
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
name: Build the main branch
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "module-appservice"
|
||||
pull_request:
|
||||
branches:
|
||||
- "module-appservice"
|
||||
defaults:
|
||||
run:
|
||||
working-directory: framework/app-service
|
||||
jobs:
|
||||
build0-main:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y btrfs-progs libbtrfs-dev
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
- run: make build
|
||||
64
.github/workflows/module_appservice_publish_docker.yaml
vendored
Normal file
64
.github/workflows/module_appservice_publish_docker.yaml
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
name: Publish app-service to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: framework/app-service
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/app-service:${{ github.event.inputs.tags }}-amd64
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/app-service:${{ github.event.inputs.tags }}-arm64
|
||||
file: Dockerfile
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/app-service:${{ github.event.inputs.tags }} --amend beclab/app-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/app-service:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/app-service:${{ github.event.inputs.tags }}
|
||||
65
.github/workflows/module_appservice_publish_imageservice.yaml
vendored
Normal file
65
.github/workflows/module_appservice_publish_imageservice.yaml
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
name: Publish image-service to Dockerhub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tags:
|
||||
description: 'Release Tags'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: framework/app-service
|
||||
|
||||
jobs:
|
||||
publish_dockerhub_amd64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push amd64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/image-service:${{ github.event.inputs.tags }}-amd64
|
||||
file: Dockerfile.image
|
||||
platforms: linux/amd64
|
||||
publish_dockerhub_arm64:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
- name: Build and push arm64 Docker image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
tags: beclab/image-service:${{ github.event.inputs.tags }}-arm64
|
||||
file: Dockerfile.image
|
||||
platforms: linux/arm64
|
||||
|
||||
publish_manifest:
|
||||
needs:
|
||||
- publish_dockerhub_amd64
|
||||
- publish_dockerhub_arm64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
|
||||
- name: Push manifest
|
||||
run: |
|
||||
docker manifest create beclab/image-service:${{ github.event.inputs.tags }} --amend beclab/image-service:${{ github.event.inputs.tags }}-amd64 --amend beclab/image-service:${{ github.event.inputs.tags }}-arm64
|
||||
docker manifest push beclab/image-service:${{ github.event.inputs.tags }}
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -37,4 +37,6 @@ docs/.vitepress/dist/
|
|||
docs/.vitepress/cache/
|
||||
node_modules
|
||||
.idea/
|
||||
cli/olares-cli*
|
||||
cli/olares-cli*
|
||||
|
||||
framework/app-service/bin
|
||||
|
|
|
|||
29
framework/app-service/Dockerfile
Normal file
29
framework/app-service/Dockerfile
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.24.6-bullseye as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod bytetrade.io/web3os/app-service/go.mod
|
||||
COPY go.sum bytetrade.io/web3os/app-service/go.sum
|
||||
|
||||
RUN cd bytetrade.io/web3os/app-service && \
|
||||
go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY cmd/ bytetrade.io/web3os/app-service/cmd/
|
||||
COPY api/ bytetrade.io/web3os/app-service/api/
|
||||
COPY controllers/ bytetrade.io/web3os/app-service/controllers/
|
||||
COPY pkg/ bytetrade.io/web3os/app-service/pkg/
|
||||
|
||||
# Build
|
||||
RUN cd bytetrade.io/web3os/app-service && \
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w" -a -o app-service cmd/app-service/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:debug
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/bytetrade.io/web3os/app-service/app-service .
|
||||
|
||||
ENTRYPOINT ["/app-service"]
|
||||
USER 65532:65532
|
||||
29
framework/app-service/Dockerfile.image
Normal file
29
framework/app-service/Dockerfile.image
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# Build the manager binary
|
||||
FROM golang:1.24.6-bullseye as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod bytetrade.io/web3os/app-service/go.mod
|
||||
COPY go.sum bytetrade.io/web3os/app-service/go.sum
|
||||
|
||||
RUN cd bytetrade.io/web3os/app-service && \
|
||||
go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY cmd/ bytetrade.io/web3os/app-service/cmd/
|
||||
COPY api/ bytetrade.io/web3os/app-service/api/
|
||||
COPY controllers/ bytetrade.io/web3os/app-service/controllers/
|
||||
COPY pkg/ bytetrade.io/web3os/app-service/pkg/
|
||||
|
||||
# Build
|
||||
RUN cd bytetrade.io/web3os/app-service && \
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w" -a -o image-service cmd/image-service/main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:debug
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/bytetrade.io/web3os/app-service/image-service .
|
||||
|
||||
ENTRYPOINT ["/image-service"]
|
||||
USER 65532:65532
|
||||
138
framework/app-service/Makefile
Normal file
138
framework/app-service/Makefile
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= controller:latest
|
||||
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
|
||||
ENVTEST_K8S_VERSION = 1.24.2
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
GOBIN=$(shell go env GOPATH)/bin
|
||||
else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
# Setting SHELL to bash allows bash commands to be executed by recipes.
|
||||
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
.PHONY: all
|
||||
all: build
|
||||
|
||||
##@ General
|
||||
|
||||
# The help target prints out all targets with their descriptions organized
|
||||
# beneath their categories. The categories are represented by '##@' and the
|
||||
# target descriptions by '##'. The awk commands is responsible for reading the
|
||||
# entire set of makefiles included in this invocation, looking for lines of the
|
||||
# file as xyz: ## something, and then pretty-format the target and help. Then,
|
||||
# if there's a line with ##@ something, that gets pretty-printed as a category.
|
||||
# More info on the usage of ANSI control characters for terminal formatting:
|
||||
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
|
||||
# More info on the awk command:
|
||||
# http://linuxcommand.org/lc3_adv_awk.php
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display this help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
##@ Development
|
||||
|
||||
.PHONY: manifests
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) crd paths="./..." output:crd:artifacts:config=.olares/config/cluster/crds
|
||||
|
||||
.PHONY: generate
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Run go fmt against code.
|
||||
go fmt ./...
|
||||
|
||||
.PHONY: vet
|
||||
vet: ## Run go vet against code.
|
||||
go vet ./...
|
||||
|
||||
.PHONY: test
|
||||
test: manifests generate fmt vet envtest ## Run tests.
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
|
||||
|
||||
##@ Build app-service
|
||||
|
||||
.PHONY: build
|
||||
build: generate fmt vet ## Build app-service binary.
|
||||
go build -o bin/app-service ./cmd/app-service/main.go
|
||||
|
||||
.PHONY: run
|
||||
run: manifests generate fmt vet ## Run a controller from your host.
|
||||
go run ./cmd/app-service/main.go
|
||||
|
||||
.PHONY: docker-build
|
||||
docker-build: test ## Build docker image with the manager.
|
||||
docker build -t ${IMG} .
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: ## Push docker image with the manager.
|
||||
docker push ${IMG}
|
||||
|
||||
|
||||
##@ Deployment
|
||||
|
||||
ifndef ignore-not-found
|
||||
ignore-not-found = false
|
||||
endif
|
||||
|
||||
.PHONY: install
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
.PHONY: deploy
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
|
||||
.PHONY: undeploy
|
||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
|
||||
|
||||
.PHONY: update-codegen
|
||||
update-codegen: ## generator clientset code.
|
||||
./hack/update-codegen.sh
|
||||
|
||||
|
||||
##@ Build Dependencies
|
||||
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
|
||||
## Tool Binaries
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v3.8.7
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.19.0
|
||||
|
||||
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/kustomize || { curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); }
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
|
||||
16
framework/app-service/PROJECT
Normal file
16
framework/app-service/PROJECT
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
domain: bytetrade.io
|
||||
layout:
|
||||
- go.kubebuilder.io/v3
|
||||
projectName: app-service
|
||||
repo: bytetrade.io/web3os/app-service
|
||||
resources:
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
controller: true
|
||||
domain: bytetrade.io
|
||||
group: app
|
||||
kind: Application
|
||||
path: bytetrade.io/web3os/app-service/api/v1alpha1
|
||||
version: v1alpha1
|
||||
version: "3"
|
||||
|
|
@ -1,6 +1,74 @@
|
|||
# `app-service`
|
||||
# app-service
|
||||
[](https://github.com/beclab/app-service/actions/workflows/build_main.yaml)
|
||||
|
||||
## Overview
|
||||
## Description
|
||||
**app-service** is a component of **Terminus OS**, principally tasked with managing the installation, upgrade, and uninstallation of Application, Model, and Recommend among other related operations.
|
||||
Additionally, **app-service** offers a seamless interface for the convenient management of Application and Recommend.
|
||||
|
||||
The `app-service` component is a core part of the Olares framework, responsible for handling the lifecycle of applications. This includes managing their installation, updates, and removal, as well as overseeing resource allocation to ensure that all applications run smoothly and efficiently within the Olares ecosystem.
|
||||
The **Application** is a Custom Resource (CR) defined by k8s Custom Resource Definition (CRD). When a user initiates an installation, it is automatically created by the Application Controller.
|
||||
|
||||
## Getting Started
|
||||
You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster.
|
||||
**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).
|
||||
|
||||
### Running on the cluster
|
||||
1. Build and push your image to the location specified by `IMG`:
|
||||
|
||||
```sh
|
||||
make docker-build docker-push IMG=<some-registry>/app-service:tag
|
||||
```
|
||||
|
||||
2. Deploy the controller to the cluster with the image specified by `IMG`:
|
||||
|
||||
```sh
|
||||
make deploy IMG=<some-registry>/app-service:tag
|
||||
```
|
||||
|
||||
### Uninstall CRDs
|
||||
To delete the CRDs from the cluster:
|
||||
|
||||
```sh
|
||||
make uninstall
|
||||
```
|
||||
|
||||
### Undeploy controller
|
||||
UnDeploy the controller to the cluster:
|
||||
|
||||
```sh
|
||||
make undeploy
|
||||
```
|
||||
|
||||
## Contributing
|
||||
// TODO(user): Add detailed information on how you would like others to contribute to this project
|
||||
|
||||
### How it works
|
||||
This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
|
||||
|
||||
It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/)
|
||||
which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster
|
||||
|
||||
### Test It Out
|
||||
1. Install the CRDs into the cluster:
|
||||
|
||||
```sh
|
||||
make install
|
||||
```
|
||||
|
||||
2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):
|
||||
|
||||
```sh
|
||||
make run
|
||||
```
|
||||
|
||||
**NOTE:** You can also run this in one step by running: `make install run`
|
||||
|
||||
### Modifying the API definitions
|
||||
If you are editing the API definitions, generate the manifests such as CRs or CRDs using:
|
||||
|
||||
```sh
|
||||
make manifests
|
||||
```
|
||||
|
||||
**NOTE:** Run `make --help` for more information on all potential `make` targets
|
||||
|
||||
More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
package v1alpha1
|
||||
|
||||
// ApplicationState is the state of an application at current time
|
||||
type ApplicationState string
|
||||
|
||||
// These ar the valid states of applications
|
||||
const (
|
||||
// AppRunning means that the application is installed success and ready for serve.
|
||||
AppRunning ApplicationState = "running"
|
||||
// AppStopped means that the application's deployment/statefulset replicas has been set to zero.
|
||||
AppStopped ApplicationState = "stopped"
|
||||
// AppNotReady means that the application's not ready to serve
|
||||
AppNotReady ApplicationState = "notReady"
|
||||
)
|
||||
|
||||
func (a ApplicationState) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
/* ApplicationState change
|
||||
+---------+ install +-------------+ +------------+ +--------------+ +--------------+ suspend +---------+ resume +----------+
|
||||
| pending | ---------> | downloading | --> | installing | --> | initializing | ---------> | | ---------> | suspend | --------> | resuming |
|
||||
+---------+ +-------------+ +------------+ +--------------+ | | +---------+ +----------+
|
||||
| | |
|
||||
+-----------------------> | running | <----------------------------------+
|
||||
| | |
|
||||
+--------------+ upgrade | |
|
||||
| upgrading | <--------- | |
|
||||
+--------------+ +--------------+
|
||||
|
|
||||
| install
|
||||
v
|
||||
+--------------+
|
||||
| uninstalling |
|
||||
+--------------+
|
||||
*/
|
||||
|
|
@ -0,0 +1,184 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
|
||||
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
|
||||
|
||||
// ApplicationSpec defines the desired state of Application
|
||||
type ApplicationSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// the entrance of the application
|
||||
Index string `json:"index,omitempty"`
|
||||
|
||||
// description from app's description or frontend
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// The url of the icon
|
||||
Icon string `json:"icon,omitempty"`
|
||||
|
||||
// the name of the application
|
||||
Name string `json:"name"`
|
||||
|
||||
// RawAppName the name of application for cloned app, if RawAppName is not empty means this app is cloned
|
||||
RawAppName string `json:"rawAppName,omitempty"`
|
||||
|
||||
// the unique id of the application
|
||||
// for sys application appid equal name otherwise appid equal md5(name)[:8]
|
||||
Appid string `json:"appid"`
|
||||
|
||||
IsSysApp bool `json:"isSysApp"`
|
||||
|
||||
// the namespace of the application
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
// the deployment of the application
|
||||
DeploymentName string `json:"deployment,omitempty"`
|
||||
|
||||
// the owner of the application
|
||||
Owner string `json:"owner,omitempty"`
|
||||
|
||||
// Entrances []Entrance `json:"entrances,omitempty"`
|
||||
Entrances []Entrance `json:"entrances,omitempty"`
|
||||
|
||||
// SharedEntrances contains entrances shared with other applications
|
||||
SharedEntrances []Entrance `json:"sharedEntrances,omitempty"`
|
||||
|
||||
Ports []ServicePort `json:"ports,omitempty"`
|
||||
TailScale TailScale `json:"tailscale,omitempty"`
|
||||
TailScaleACLs []ACL `json:"tailscaleAcls,omitempty"`
|
||||
|
||||
// the extend settings of the application
|
||||
Settings map[string]string `json:"settings,omitempty"`
|
||||
}
|
||||
|
||||
type ACL struct {
|
||||
Action string `json:"action,omitempty"`
|
||||
Src []string `json:"src,omitempty"`
|
||||
Proto string `json:"proto"`
|
||||
Dst []string `json:"dst"`
|
||||
}
|
||||
|
||||
type TailScale struct {
|
||||
ACLs []ACL `json:"acls,omitempty"`
|
||||
SubRoutes []string `json:"subRoutes,omitempty"`
|
||||
}
|
||||
|
||||
type EntranceState string
|
||||
|
||||
const (
|
||||
EntranceRunning EntranceState = "running"
|
||||
EntranceNotReady EntranceState = "notReady"
|
||||
EntranceStopped EntranceState = "stopped"
|
||||
)
|
||||
|
||||
func (e EntranceState) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// Entrance contains details for application entrance
|
||||
type Entrance struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Host string `yaml:"host" json:"host"`
|
||||
Port int32 `yaml:"port" json:"port"`
|
||||
// Optional. if invisible=true.
|
||||
Icon string `yaml:"icon,omitempty" json:"icon,omitempty"`
|
||||
// Optional. if invisible=true.
|
||||
Title string `yaml:"title" json:"title,omitempty"`
|
||||
AuthLevel string `yaml:"authLevel,omitempty" json:"authLevel,omitempty"`
|
||||
Invisible bool `yaml:"invisible,omitempty" json:"invisible,omitempty"`
|
||||
URL string `yaml:"url,omitempty" json:"url,omitempty"`
|
||||
|
||||
// openMethod has three choices default, iframe, window
|
||||
// Optional. if invisible=true.
|
||||
OpenMethod string `yaml:"openMethod,omitempty" json:"openMethod,omitempty"`
|
||||
|
||||
WindowPushState bool `yaml:"windowPushState,omitempty" json:"windowPushState,omitempty"`
|
||||
Skip bool `yaml:"skip,omitempty" json:"skip,omitempty"`
|
||||
}
|
||||
|
||||
type ServicePort struct {
|
||||
Name string `json:"name" yaml:"name"`
|
||||
Host string `yaml:"host" json:"host"`
|
||||
Port int32 `yaml:"port" json:"port"`
|
||||
|
||||
ExposePort int32 `yaml:"exposePort" json:"exposePort,omitempty"`
|
||||
|
||||
// The protocol for this entrance. Supports "tcp" and "udp","".
|
||||
// Default is tcp/udp, "" mean tcp and udp.
|
||||
// +default="tcp/udp"
|
||||
// +optional
|
||||
Protocol string `yaml:"protocol" json:"protocol,omitempty"`
|
||||
AddToTailscaleAcl bool `yaml:"addToTailscaleAcl" json:"addToTailscaleAcl,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationStatus defines the observed state of Application
|
||||
type ApplicationStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// the state of the application: draft, submitted, passed, rejected, suspended, active
|
||||
State string `json:"state,omitempty"`
|
||||
// for downloading phase
|
||||
Progress string `json:"progress,omitempty"`
|
||||
UpdateTime *metav1.Time `json:"updateTime"`
|
||||
StatusTime *metav1.Time `json:"statusTime"`
|
||||
// StartedTime is the time that app first to running state
|
||||
StartedTime *metav1.Time `json:"startedTime,omitempty"`
|
||||
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
EntranceStatuses []EntranceStatus `json:"entranceStatuses,omitempty"`
|
||||
}
|
||||
|
||||
type EntranceStatus struct {
|
||||
Name string `json:"name"`
|
||||
State EntranceState `json:"state"`
|
||||
StatusTime *metav1.Time `json:"statusTime"`
|
||||
Reason string `json:"reason"`
|
||||
Message string `json:"message,omitempty"`
|
||||
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
}
|
||||
|
||||
//+genclient
|
||||
//+genclient:nonNamespaced
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:resource:scope=Cluster, shortName={app}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.name, name=application name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.namespace, name=namespace, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.status.state, name=state, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Application is the Schema for the applications API
|
||||
type Application struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ApplicationSpec `json:"spec,omitempty"`
|
||||
Status ApplicationStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ApplicationList contains a list of Application
|
||||
type ApplicationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Application `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Application{}, &ApplicationList{})
|
||||
}
|
||||
|
||||
// AppResourceName return application name
|
||||
func AppResourceName(name, namespace string) string {
|
||||
return fmt.Sprintf("%s-%s", namespace, name)
|
||||
}
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
package v1alpha1
|
||||
|
||||
// ApplicationManagerState is the state of an applicationmanager at current time
|
||||
type ApplicationManagerState string
|
||||
|
||||
// Describe the states of an applicationmanager
|
||||
const (
|
||||
// Pending means that the operation is waiting to be processed.
|
||||
Pending ApplicationManagerState = "pending"
|
||||
|
||||
Downloading ApplicationManagerState = "downloading"
|
||||
|
||||
// Installing means that the installation operation is underway.
|
||||
Installing ApplicationManagerState = "installing"
|
||||
|
||||
Initializing ApplicationManagerState = "initializing"
|
||||
|
||||
Running ApplicationManagerState = "running"
|
||||
|
||||
// Upgrading means that the upgrade operation is underway.
|
||||
Upgrading ApplicationManagerState = "upgrading"
|
||||
|
||||
ApplyingEnv ApplicationManagerState = "applyingEnv"
|
||||
|
||||
Stopping ApplicationManagerState = "stopping"
|
||||
|
||||
Stopped ApplicationManagerState = "stopped"
|
||||
|
||||
// Resuming means that the resume operation is underway.
|
||||
Resuming ApplicationManagerState = "resuming"
|
||||
|
||||
// Uninstalling means that the uninstallation operation is underway.
|
||||
Uninstalling ApplicationManagerState = "uninstalling"
|
||||
|
||||
UninstallFailed ApplicationManagerState = "uninstallFailed"
|
||||
|
||||
ResumeFailed ApplicationManagerState = "resumeFailed"
|
||||
|
||||
UpgradeFailed ApplicationManagerState = "upgradeFailed"
|
||||
|
||||
ApplyEnvFailed ApplicationManagerState = "applyEnvFailed"
|
||||
|
||||
StopFailed ApplicationManagerState = "stopFailed"
|
||||
|
||||
DownloadFailed ApplicationManagerState = "downloadFailed"
|
||||
|
||||
InstallFailed ApplicationManagerState = "installFailed"
|
||||
|
||||
//InitialFailed ApplicationManagerState = "initialFailed"
|
||||
|
||||
Uninstalled ApplicationManagerState = "uninstalled"
|
||||
|
||||
// PendingCanceled means that the installation operation has been canceled.
|
||||
PendingCanceled ApplicationManagerState = "pendingCanceled"
|
||||
DownloadingCanceled ApplicationManagerState = "downloadingCanceled"
|
||||
InstallingCanceled ApplicationManagerState = "installingCanceled"
|
||||
InitializingCanceled ApplicationManagerState = "initializingCanceled"
|
||||
UpgradingCanceled ApplicationManagerState = "upgradingCanceled"
|
||||
ApplyingEnvCanceled ApplicationManagerState = "applyingEnvCanceled"
|
||||
ResumingCanceled ApplicationManagerState = "resumingCanceled"
|
||||
|
||||
// PendingCanceling means that the installation operation is under canceling operation.
|
||||
PendingCanceling ApplicationManagerState = "pendingCanceling"
|
||||
DownloadingCanceling ApplicationManagerState = "downloadingCanceling"
|
||||
InstallingCanceling ApplicationManagerState = "installingCanceling"
|
||||
InitializingCanceling ApplicationManagerState = "initializingCanceling"
|
||||
UpgradingCanceling ApplicationManagerState = "upgradingCanceling"
|
||||
ApplyingEnvCanceling ApplicationManagerState = "applyingEnvCanceling"
|
||||
ResumingCanceling ApplicationManagerState = "resumingCanceling"
|
||||
//SuspendingCanceling ApplicationManagerState = "suspendingCanceling"
|
||||
|
||||
PendingCancelFailed ApplicationManagerState = "pendingCancelFailed"
|
||||
DownloadingCancelFailed ApplicationManagerState = "downloadingCancelFailed"
|
||||
InstallingCancelFailed ApplicationManagerState = "installingCancelFailed"
|
||||
//InitializingCancelFailed ApplicationManagerState = "initializingCancelFailed"
|
||||
UpgradingCancelFailed ApplicationManagerState = "upgradingCancelFailed"
|
||||
ApplyingEnvCancelFailed ApplicationManagerState = "applyingEnvCancelFailed"
|
||||
ResumingCancelFailed ApplicationManagerState = "resumingCancelFailed"
|
||||
|
||||
//SuspendingCancelFailed ApplicationManagerState = "suspendingCancelFailed"
|
||||
|
||||
Failed ApplicationManagerState = "failed"
|
||||
// Canceled ApplicationManagerState = "canceled"
|
||||
)
|
||||
|
||||
func (a ApplicationManagerState) String() string {
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// OpType represents the type of operation being performed.
|
||||
type OpType string
|
||||
|
||||
// Describe the supported operation types.
|
||||
const (
|
||||
// InstallOp means an installation operation.
|
||||
InstallOp OpType = "install"
|
||||
// UninstallOp means an uninstallation operation.
|
||||
UninstallOp OpType = "uninstall"
|
||||
// UpgradeOp means an upgrade operation.
|
||||
UpgradeOp OpType = "upgrade"
|
||||
// StopOp means a suspend operation.
|
||||
StopOp OpType = "stop"
|
||||
// ResumeOp means a resume operation.
|
||||
ResumeOp OpType = "resume"
|
||||
// CancelOp means a cancel operation that operation can cancel an operation at pending or installing.
|
||||
CancelOp OpType = "cancel"
|
||||
// ApplyEnvOp means applying environment variables
|
||||
ApplyEnvOp OpType = "applyEnv"
|
||||
)
|
||||
|
||||
// Type means the entity that system support.
|
||||
type Type string
|
||||
|
||||
const (
|
||||
// App means application(crd).
|
||||
App Type = "app"
|
||||
// Recommend means argo cronworkflows.
|
||||
Recommend Type = "recommend"
|
||||
|
||||
// Middleware means middleware like mongodb
|
||||
Middleware Type = "middleware"
|
||||
)
|
||||
|
||||
func (t Type) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
// ApplicationManagerState change for app
|
||||
/*
|
||||
+------------+ +--------------+
|
||||
+----------------- | canceling | --> | canceled |
|
||||
| +------------+ +--------------+
|
||||
| ^ ^
|
||||
| +---------------+------------------+
|
||||
| | |
|
||||
+-----------+ | +---------+ +------------+ +-----------------------------+ +----------+
|
||||
| upgrading | ------+ | | pending | --> | installing | --> | | -------> | resuming |
|
||||
+-----------+ | | +---------+ +------------+ | | +----------+
|
||||
| | | ^ | | | |
|
||||
| | | +---------------+--------------- | completed | <---------------------+
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| | | +----+ +> | | -+
|
||||
| | | | | +-----------------------------+ |
|
||||
| | | | | ^ |
|
||||
| | +---------------+------------------+----+--------------------+ |
|
||||
| | | | | v |
|
||||
| | | | +--------------+ +--------+ |
|
||||
| +--------------------+------------------+ | uninstalling | --> | | |
|
||||
| | +--------------+ | | |
|
||||
| | ^ | | |
|
||||
| +------------------+ | | failed | |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
+---------------------------------------------------------+----+----------------> | | |
|
||||
| | +--------+ |
|
||||
| | ^ |
|
||||
| +--------------------+---------+
|
||||
| |
|
||||
| |
|
||||
+-------------------------+
|
||||
*/
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+genclient:nonNamespaced
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:resource:scope=Cluster, shortName={appmgr}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.appName, name=application name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.appNamespace, name=namespace, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.status.state, name=state, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ApplicationManager is the Schema for the application managers API
|
||||
type ApplicationManager struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ApplicationManagerSpec `json:"spec,omitempty"`
|
||||
Status ApplicationManagerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationManagerStatus defines the observed state of ApplicationManager
|
||||
type ApplicationManagerStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
OpType OpType `json:"opType,omitempty"`
|
||||
OpGeneration int64 `json:"opGeneration,omitempty"`
|
||||
OpID string `json:"opId,omitempty"`
|
||||
State ApplicationManagerState `json:"state,omitempty"`
|
||||
OpRecords []OpRecord `json:"opRecords,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Payload map[string]string `json:"payload,omitempty"`
|
||||
Progress string `json:"progress,omitempty"`
|
||||
UpdateTime *metav1.Time `json:"updateTime,omitempty"`
|
||||
StatusTime *metav1.Time `json:"statusTime,omitempty"`
|
||||
Completed bool `json:"completed,omitempty"`
|
||||
OpTime *metav1.Time `json:"opTime,omitempty"`
|
||||
LastState ApplicationManagerState `json:"lastState,omitempty"`
|
||||
}
|
||||
|
||||
// ApplicationManagerSpec defines the desired state of ApplicationManager
|
||||
type ApplicationManagerSpec struct {
|
||||
AppName string `json:"appName"`
|
||||
RawAppName string `json:"rawAppName,omitempty"`
|
||||
AppNamespace string `json:"appNamespace,omitempty"`
|
||||
AppOwner string `json:"appOwner,omitempty"`
|
||||
Config string `json:"config,omitempty"`
|
||||
Source string `json:"source"`
|
||||
Type Type `json:"type"`
|
||||
OpType OpType `json:"opType"`
|
||||
}
|
||||
|
||||
// OpRecord contains details of an operation.
|
||||
type OpRecord struct {
|
||||
OpType OpType `json:"opType"`
|
||||
OpID string `json:"opId,omitempty"`
|
||||
Message string `json:"message"`
|
||||
Version string `json:"version"`
|
||||
Source string `json:"source"`
|
||||
Status ApplicationManagerState `json:"status"`
|
||||
StateTime *metav1.Time `json:"statusTime"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ApplicationManagerList contains a list of ApplicationManager
|
||||
type ApplicationManagerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ApplicationManager `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&ApplicationManager{}, &ApplicationManagerList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
// +groupName=app.bytetrade.io
|
||||
package v1alpha1
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
// Package v1alpha1 contains API Schema definitions for the app v1alpha1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=app.bytetrade.io
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "app.bytetrade.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
144
framework/app-service/api/app.bytetrade.io/v1alpha1/helper.go
Normal file
144
framework/app-service/api/app.bytetrade.io/v1alpha1/helper.go
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/kubesphere"
|
||||
"bytetrade.io/web3os/app-service/pkg/users/userspace"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type DefaultThirdLevelDomainConfig struct {
|
||||
AppName string `json:"appName"`
|
||||
EntranceName string `json:"entranceName"`
|
||||
ThirdLevelDomain string `json:"thirdLevelDomain"`
|
||||
}
|
||||
|
||||
func (a *Application) IsClusterScoped() bool {
|
||||
if a.Spec.Settings == nil {
|
||||
return false
|
||||
}
|
||||
if v, ok := a.Spec.Settings["clusterScoped"]; ok && v == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *ApplicationManager) GetAppConfig(appConfig any) (err error) {
|
||||
err = json.Unmarshal([]byte(a.Spec.Config), appConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("unmarshal to appConfig failed %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (a *ApplicationManager) SetAppConfig(appConfig any) error {
|
||||
configBytes, err := json.Marshal(appConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("marshal appConfig failed %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
a.Spec.Config = string(configBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ApplicationManager) GetMarketSource() string {
|
||||
return a.Annotations[constants.AppMarketSourceKey]
|
||||
}
|
||||
|
||||
type AppName string
|
||||
|
||||
func (s AppName) GetAppID() string {
|
||||
if s.IsSysApp() {
|
||||
return string(s)
|
||||
}
|
||||
hash := md5.Sum([]byte(s))
|
||||
hashString := hex.EncodeToString(hash[:])
|
||||
return hashString[:8]
|
||||
}
|
||||
|
||||
func (s AppName) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func (s AppName) IsSysApp() bool {
|
||||
return userspace.IsSysApp(string(s))
|
||||
}
|
||||
|
||||
func (s AppName) IsGeneratedApp() bool {
|
||||
return userspace.IsGeneratedApp(string(s))
|
||||
}
|
||||
|
||||
func (s AppName) SharedEntranceIdPrefix() string {
|
||||
hash := md5.Sum([]byte(s.GetAppID() + "shared"))
|
||||
hashString := hex.EncodeToString(hash[:])
|
||||
return hashString[:8]
|
||||
}
|
||||
|
||||
func (app *Application) GenEntranceURL(ctx context.Context) ([]Entrance, error) {
|
||||
zone, err := kubesphere.GetUserZone(ctx, app.Spec.Owner)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get user zone: %v", err)
|
||||
}
|
||||
|
||||
if len(zone) > 0 {
|
||||
var appDomainConfigs []DefaultThirdLevelDomainConfig
|
||||
if defaultThirdLevelDomainConfig, ok := app.Spec.Settings["defaultThirdLevelDomainConfig"]; ok && len(defaultThirdLevelDomainConfig) > 0 {
|
||||
err := json.Unmarshal([]byte(app.Spec.Settings["defaultThirdLevelDomainConfig"]), &appDomainConfigs)
|
||||
if err != nil {
|
||||
klog.Errorf("unmarshal defaultThirdLevelDomainConfig error %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
appid := AppName(app.Spec.Name).GetAppID()
|
||||
if len(app.Spec.Entrances) == 1 {
|
||||
app.Spec.Entrances[0].URL = fmt.Sprintf("%s.%s", appid, zone)
|
||||
} else {
|
||||
for i := range app.Spec.Entrances {
|
||||
app.Spec.Entrances[i].URL = fmt.Sprintf("%s%d.%s", appid, i, zone)
|
||||
for _, adc := range appDomainConfigs {
|
||||
if adc.AppName == app.Spec.Name && adc.EntranceName == app.Spec.Entrances[i].Name && len(adc.ThirdLevelDomain) > 0 {
|
||||
app.Spec.Entrances[i].URL = fmt.Sprintf("%s.%s", adc.ThirdLevelDomain, zone)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return app.Spec.Entrances, nil
|
||||
}
|
||||
|
||||
func (app *Application) GenSharedEntranceURL(ctx context.Context) ([]Entrance, error) {
|
||||
zone, err := kubesphere.GetUserZone(ctx, app.Spec.Owner)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get user zone: %v", err)
|
||||
}
|
||||
|
||||
if len(zone) > 0 {
|
||||
tokens := strings.Split(zone, ".")
|
||||
tokens[0] = "shared"
|
||||
sharedZone := strings.Join(tokens, ".")
|
||||
|
||||
appName := AppName(app.Spec.Name)
|
||||
sharedEntranceIdPrefix := appName.SharedEntranceIdPrefix()
|
||||
for i := range app.Spec.SharedEntrances {
|
||||
if app.Spec.SharedEntrances[i].Port > 0 {
|
||||
app.Spec.SharedEntrances[i].URL = fmt.Sprintf("%s%d.%s:%d", sharedEntranceIdPrefix, i, sharedZone, app.Spec.SharedEntrances[i].Port)
|
||||
} else {
|
||||
app.Spec.SharedEntrances[i].URL = fmt.Sprintf("%s%d.%s", sharedEntranceIdPrefix, i, sharedZone)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return app.Spec.SharedEntrances, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+genclient:nonNamespaced
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:resource:scope=Cluster, shortName={appimage}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.appName, name=application name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.status.state, name=state, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AppImage is the Schema for the image managers API
|
||||
type AppImage struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageSpec `json:"spec,omitempty"`
|
||||
Status ImageStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type ImageSpec struct {
|
||||
AppName string `json:"appName"`
|
||||
Nodes []string `json:"nodes"`
|
||||
Refs []string `json:"refs"`
|
||||
}
|
||||
|
||||
type ImageStatus struct {
|
||||
// processing, completed, failed
|
||||
|
||||
State string `json:"state"`
|
||||
Images []ImageInfo `json:"images,omitempty"`
|
||||
StatueTime *metav1.Time `json:"statueTime"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Conditions []Condition `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
type Condition struct {
|
||||
Node string `json:"node"`
|
||||
Completed bool `json:"completed"`
|
||||
}
|
||||
|
||||
type ImageInfo struct {
|
||||
Node string `json:"node"`
|
||||
Name string `json:"name"`
|
||||
Architecture string `json:"architecture,omitempty"`
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Os string `json:"os,omitempty"`
|
||||
LayersData []ImageLayer `json:"layersData"`
|
||||
}
|
||||
|
||||
type ImageLayer struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
Digest string `json:"digest"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AppImageList contains a list of AppImage
|
||||
type AppImageList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []AppImage `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&AppImage{}, &AppImageList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+genclient:nonNamespaced
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:resource:scope=Cluster, shortName={im}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.appName, name=application name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.appNamespace, name=namespace, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.status.state, name=state, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ImageManager is the Schema for the image managers API
|
||||
type ImageManager struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageManagerSpec `json:"spec,omitempty"`
|
||||
Status ImageManagerStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ImageManagerStatus defines the observed state of ApplicationManager
|
||||
type ImageManagerStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
Conditions map[string]map[string]map[string]string `json:"conditions,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
State string `json:"state"`
|
||||
UpdateTime *metav1.Time `json:"updateTime"`
|
||||
StatusTime *metav1.Time `json:"statusTime"`
|
||||
}
|
||||
|
||||
// ImageManagerSpec defines the desired state of ImageManager
|
||||
type ImageManagerSpec struct {
|
||||
AppName string `json:"appName"`
|
||||
AppNamespace string `json:"appNamespace,omitempty"`
|
||||
AppOwner string `json:"appOwner,omitempty"`
|
||||
Refs []Ref `json:"refs"`
|
||||
Nodes []string `json:"nodes"`
|
||||
}
|
||||
|
||||
type Ref struct {
|
||||
Name string `json:"name"`
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy"`
|
||||
}
|
||||
|
||||
type ImageProgress struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
ImageRef string `json:"imageRef"`
|
||||
Progress string `json:"progress"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ImageManagerList contains a list of ApplicationManager
|
||||
type ImageManagerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ImageManager `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&ImageManager{}, &ImageManagerList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = GroupVersion
|
||||
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
|
@ -0,0 +1,745 @@
|
|||
//go:build !ignore_autogenerated
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ACL) DeepCopyInto(out *ACL) {
|
||||
*out = *in
|
||||
if in.Src != nil {
|
||||
in, out := &in.Src, &out.Src
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Dst != nil {
|
||||
in, out := &in.Dst, &out.Dst
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACL.
|
||||
func (in *ACL) DeepCopy() *ACL {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ACL)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppImage) DeepCopyInto(out *AppImage) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImage.
|
||||
func (in *AppImage) DeepCopy() *AppImage {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppImage)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AppImage) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppImageList) DeepCopyInto(out *AppImageList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AppImage, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppImageList.
|
||||
func (in *AppImageList) DeepCopy() *AppImageList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppImageList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AppImageList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Application) DeepCopyInto(out *Application) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Application.
|
||||
func (in *Application) DeepCopy() *Application {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Application)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Application) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationList) DeepCopyInto(out *ApplicationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Application, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationList.
|
||||
func (in *ApplicationList) DeepCopy() *ApplicationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ApplicationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationManager) DeepCopyInto(out *ApplicationManager) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationManager.
|
||||
func (in *ApplicationManager) DeepCopy() *ApplicationManager {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationManager)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ApplicationManager) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationManagerList) DeepCopyInto(out *ApplicationManagerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ApplicationManager, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationManagerList.
|
||||
func (in *ApplicationManagerList) DeepCopy() *ApplicationManagerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationManagerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ApplicationManagerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationManagerSpec) DeepCopyInto(out *ApplicationManagerSpec) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationManagerSpec.
|
||||
func (in *ApplicationManagerSpec) DeepCopy() *ApplicationManagerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationManagerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationManagerStatus) DeepCopyInto(out *ApplicationManagerStatus) {
|
||||
*out = *in
|
||||
if in.OpRecords != nil {
|
||||
in, out := &in.OpRecords, &out.OpRecords
|
||||
*out = make([]OpRecord, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Payload != nil {
|
||||
in, out := &in.Payload, &out.Payload
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.UpdateTime != nil {
|
||||
in, out := &in.UpdateTime, &out.UpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.StatusTime != nil {
|
||||
in, out := &in.StatusTime, &out.StatusTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.OpTime != nil {
|
||||
in, out := &in.OpTime, &out.OpTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationManagerStatus.
|
||||
func (in *ApplicationManagerStatus) DeepCopy() *ApplicationManagerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationManagerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) {
|
||||
*out = *in
|
||||
if in.Entrances != nil {
|
||||
in, out := &in.Entrances, &out.Entrances
|
||||
*out = make([]Entrance, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SharedEntrances != nil {
|
||||
in, out := &in.SharedEntrances, &out.SharedEntrances
|
||||
*out = make([]Entrance, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]ServicePort, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.TailScale.DeepCopyInto(&out.TailScale)
|
||||
if in.TailScaleACLs != nil {
|
||||
in, out := &in.TailScaleACLs, &out.TailScaleACLs
|
||||
*out = make([]ACL, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Settings != nil {
|
||||
in, out := &in.Settings, &out.Settings
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSpec.
|
||||
func (in *ApplicationSpec) DeepCopy() *ApplicationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) {
|
||||
*out = *in
|
||||
if in.UpdateTime != nil {
|
||||
in, out := &in.UpdateTime, &out.UpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.StatusTime != nil {
|
||||
in, out := &in.StatusTime, &out.StatusTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.StartedTime != nil {
|
||||
in, out := &in.StartedTime, &out.StartedTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.LastTransitionTime != nil {
|
||||
in, out := &in.LastTransitionTime, &out.LastTransitionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.EntranceStatuses != nil {
|
||||
in, out := &in.EntranceStatuses, &out.EntranceStatuses
|
||||
*out = make([]EntranceStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus.
|
||||
func (in *ApplicationStatus) DeepCopy() *ApplicationStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplicationStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Condition) DeepCopyInto(out *Condition) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
|
||||
func (in *Condition) DeepCopy() *Condition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Condition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DefaultThirdLevelDomainConfig) DeepCopyInto(out *DefaultThirdLevelDomainConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultThirdLevelDomainConfig.
|
||||
func (in *DefaultThirdLevelDomainConfig) DeepCopy() *DefaultThirdLevelDomainConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DefaultThirdLevelDomainConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Entrance) DeepCopyInto(out *Entrance) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Entrance.
|
||||
func (in *Entrance) DeepCopy() *Entrance {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Entrance)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EntranceStatus) DeepCopyInto(out *EntranceStatus) {
|
||||
*out = *in
|
||||
if in.StatusTime != nil {
|
||||
in, out := &in.StatusTime, &out.StatusTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.LastTransitionTime != nil {
|
||||
in, out := &in.LastTransitionTime, &out.LastTransitionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntranceStatus.
|
||||
func (in *EntranceStatus) DeepCopy() *EntranceStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EntranceStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageInfo) DeepCopyInto(out *ImageInfo) {
|
||||
*out = *in
|
||||
if in.LayersData != nil {
|
||||
in, out := &in.LayersData, &out.LayersData
|
||||
*out = make([]ImageLayer, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInfo.
|
||||
func (in *ImageInfo) DeepCopy() *ImageInfo {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageInfo)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageLayer) DeepCopyInto(out *ImageLayer) {
|
||||
*out = *in
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer.
|
||||
func (in *ImageLayer) DeepCopy() *ImageLayer {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageLayer)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageManager) DeepCopyInto(out *ImageManager) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManager.
|
||||
func (in *ImageManager) DeepCopy() *ImageManager {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageManager)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ImageManager) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageManagerList) DeepCopyInto(out *ImageManagerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ImageManager, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManagerList.
|
||||
func (in *ImageManagerList) DeepCopy() *ImageManagerList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageManagerList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ImageManagerList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageManagerSpec) DeepCopyInto(out *ImageManagerSpec) {
|
||||
*out = *in
|
||||
if in.Refs != nil {
|
||||
in, out := &in.Refs, &out.Refs
|
||||
*out = make([]Ref, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManagerSpec.
|
||||
func (in *ImageManagerSpec) DeepCopy() *ImageManagerSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageManagerSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageManagerStatus) DeepCopyInto(out *ImageManagerStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make(map[string]map[string]map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal map[string]map[string]string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
inVal := (*in)[key]
|
||||
in, out := &inVal, &outVal
|
||||
*out = make(map[string]map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal map[string]string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
inVal := (*in)[key]
|
||||
in, out := &inVal, &outVal
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.UpdateTime != nil {
|
||||
in, out := &in.UpdateTime, &out.UpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.StatusTime != nil {
|
||||
in, out := &in.StatusTime, &out.StatusTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManagerStatus.
|
||||
func (in *ImageManagerStatus) DeepCopy() *ImageManagerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageManagerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageProgress) DeepCopyInto(out *ImageProgress) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageProgress.
|
||||
func (in *ImageProgress) DeepCopy() *ImageProgress {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageProgress)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
|
||||
*out = *in
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Refs != nil {
|
||||
in, out := &in.Refs, &out.Refs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
|
||||
func (in *ImageSpec) DeepCopy() *ImageSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageStatus) DeepCopyInto(out *ImageStatus) {
|
||||
*out = *in
|
||||
if in.Images != nil {
|
||||
in, out := &in.Images, &out.Images
|
||||
*out = make([]ImageInfo, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.StatueTime != nil {
|
||||
in, out := &in.StatueTime, &out.StatueTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]Condition, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus.
|
||||
func (in *ImageStatus) DeepCopy() *ImageStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OpRecord) DeepCopyInto(out *OpRecord) {
|
||||
*out = *in
|
||||
if in.StateTime != nil {
|
||||
in, out := &in.StateTime, &out.StateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpRecord.
|
||||
func (in *OpRecord) DeepCopy() *OpRecord {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OpRecord)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Ref) DeepCopyInto(out *Ref) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ref.
|
||||
func (in *Ref) DeepCopy() *Ref {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Ref)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServicePort) DeepCopyInto(out *ServicePort) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort.
|
||||
func (in *ServicePort) DeepCopy() *ServicePort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServicePort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TailScale) DeepCopyInto(out *TailScale) {
|
||||
*out = *in
|
||||
if in.ACLs != nil {
|
||||
in, out := &in.ACLs, &out.ACLs
|
||||
*out = make([]ACL, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.SubRoutes != nil {
|
||||
in, out := &in.SubRoutes, &out.SubRoutes
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailScale.
|
||||
func (in *TailScale) DeepCopy() *TailScale {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TailScale)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:resource:scope=Namespaced, shortName={appenv}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.appName, name=app name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.appOwner, name=owner, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
|
||||
// AppEnv is the Schema for the application environment variables API
|
||||
type AppEnv struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
AppName string `json:"appName" yaml:"appName" validate:"required"`
|
||||
AppOwner string `json:"appOwner" yaml:"appOwner" validate:"required"`
|
||||
Envs []AppEnvVar `json:"envs,omitempty" yaml:"envs,omitempty"`
|
||||
NeedApply bool `json:"needApply,omitempty" yaml:"needApply,omitempty"`
|
||||
}
|
||||
|
||||
type AppEnvVar struct {
|
||||
EnvVarSpec `json:",inline" yaml:",inline"`
|
||||
ApplyOnChange bool `json:"applyOnChange,omitempty" yaml:"applyOnChange,omitempty"`
|
||||
ValueFrom *ValueFrom `json:"valueFrom,omitempty" yaml:"valueFrom,omitempty"`
|
||||
}
|
||||
|
||||
// ValueFrom defines a reference to an environment variable (UserEnv or SystemEnv)
|
||||
type ValueFrom struct {
|
||||
EnvName string `json:"envName" validate:"required"`
|
||||
Status string `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type EnvValueOptionItem struct {
|
||||
Title string `json:"title" yaml:"title"`
|
||||
Value string `json:"value" yaml:"value"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AppEnvList contains a list of AppEnv
|
||||
type AppEnvList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []AppEnv `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&AppEnv{}, &AppEnvList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
package v1alpha1
|
||||
|
||||
// EnvVarSpec defines the common fields for environment variables
|
||||
// This struct is embedded in SystemEnv, UserEnv, and AppEnvVar
|
||||
type EnvVarSpec struct {
|
||||
EnvName string `json:"envName" yaml:"envName" validate:"required"`
|
||||
Value string `json:"value,omitempty" yaml:"value,omitempty"`
|
||||
Default string `json:"default,omitempty" yaml:"default,omitempty"`
|
||||
Editable bool `json:"editable,omitempty" yaml:"editable,omitempty"`
|
||||
Type string `json:"type,omitempty" yaml:"type,omitempty"`
|
||||
Required bool `json:"required,omitempty" yaml:"required,omitempty"`
|
||||
Title string `json:"title,omitempty" yaml:"title,omitempty"`
|
||||
Description string `json:"description,omitempty" yaml:"description,omitempty"`
|
||||
// Options defines a finite set of allowed values for this env var
|
||||
Options []EnvValueOptionItem `json:"options,omitempty" yaml:"options,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^https?://`
|
||||
// RemoteOptions provides a URL (http/https) returning a JSON-encoded string array of allowed values
|
||||
RemoteOptions string `json:"remoteOptions,omitempty" yaml:"remoteOptions,omitempty"`
|
||||
Regex string `json:"regex,omitempty" yaml:"regex,omitempty"`
|
||||
}
|
||||
|
||||
// GetEffectiveValue returns the effective value of the environment variable.
|
||||
// If Value is not empty, it returns Value; otherwise, it returns Default.
|
||||
func (e *EnvVarSpec) GetEffectiveValue() string {
|
||||
if e.Value != "" {
|
||||
return e.Value
|
||||
}
|
||||
return e.Default
|
||||
}
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
// +groupName=sys.bytetrade.io
|
||||
package v1alpha1
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
)
|
||||
|
||||
func (e *EnvVarSpec) ValidateValue(value string) error {
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
if err := e.validateType(value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.validateOptions(value); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.validateRegex(value); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EnvVarSpec) validateType(value string) error {
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
switch e.Type {
|
||||
case "", "string", "password":
|
||||
return nil
|
||||
case "int":
|
||||
_, err := strconv.Atoi(value)
|
||||
return err
|
||||
case "bool":
|
||||
_, err := strconv.ParseBool(value)
|
||||
return err
|
||||
case "url":
|
||||
_, err := url.ParseRequestURI(value)
|
||||
return err
|
||||
case "ip":
|
||||
ip := net.ParseIP(value)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("invalid ip '%s'", value)
|
||||
}
|
||||
return nil
|
||||
case "domain":
|
||||
errs := validation.IsDNS1123Subdomain(value)
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("invalid domain '%s'", value)
|
||||
}
|
||||
return nil
|
||||
case "email":
|
||||
_, err := mail.ParseAddress(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid email '%s'", value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateOptions validates the given value against Options and/or RemoteOptions.
|
||||
// Rules:
|
||||
// - If both Options and RemoteOptions are set, value is valid if it is in either set.
|
||||
// - If only Options is set, value must be in Options.
|
||||
// - If only RemoteOptions is set, value must be in the fetched remote list.
|
||||
// - If neither is set, any value is accepted.
|
||||
func (e *EnvVarSpec) validateOptions(value string) error {
|
||||
if value == "" {
|
||||
return nil
|
||||
}
|
||||
hasOptions := len(e.Options) > 0
|
||||
hasRemote := strings.TrimSpace(e.RemoteOptions) != ""
|
||||
|
||||
if !hasOptions && !hasRemote {
|
||||
return nil
|
||||
}
|
||||
|
||||
if hasOptions && hasRemote {
|
||||
if optionsContainValue(e.Options, value) {
|
||||
return nil
|
||||
}
|
||||
allowed, err := fetchRemoteOptions(e.RemoteOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid remoteOptions: %w", err)
|
||||
}
|
||||
if !optionsContainValue(allowed, value) {
|
||||
return fmt.Errorf("value not allowed by options or remoteOptions")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if hasOptions {
|
||||
if !optionsContainValue(e.Options, value) {
|
||||
return fmt.Errorf("value not in options")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
allowed, err := fetchRemoteOptions(e.RemoteOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid remoteOptions: %w", err)
|
||||
}
|
||||
if !optionsContainValue(allowed, value) {
|
||||
return fmt.Errorf("value not in remoteOptions")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func optionsContainValue(options []EnvValueOptionItem, v string) bool {
|
||||
for _, item := range options {
|
||||
if item.Value == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fetchRemoteOptions fetches allowed values from a remote URL.
|
||||
// Response body must be a JSON array of EnvValueOptionItem: [{"title":"A","value":"a"}, ...]
|
||||
func fetchRemoteOptions(endpoint string) ([]EnvValueOptionItem, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse url failed: %w", err)
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return nil, fmt.Errorf("unsupported scheme: %s", u.Scheme)
|
||||
}
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetch failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read body failed: %w", err)
|
||||
}
|
||||
var items []EnvValueOptionItem
|
||||
if err := json.Unmarshal(body, &items); err != nil {
|
||||
return nil, fmt.Errorf("decode json failed: %w", err)
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
func (e *EnvVarSpec) validateRegex(value string) error {
|
||||
if e.Regex == "" {
|
||||
return nil
|
||||
}
|
||||
re, err := regexp.Compile(e.Regex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid regex: %w", err)
|
||||
}
|
||||
if !re.MatchString(value) {
|
||||
return fmt.Errorf("value '%s' does not match regex '%s'", value, e.Regex)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
// Package v1alpha1 contains API Schema definitions for the sys v1alpha1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=sys.bytetrade.io
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects
|
||||
GroupVersion = schema.GroupVersion{Group: "sys.bytetrade.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = GroupVersion
|
||||
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+genclient:nonNamespaced
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:resource:scope=Cluster, shortName={sysenv}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.envName, name=env name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.value, name=value, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.editable, name=editable, type=boolean
|
||||
//+kubebuilder:printcolumn:JSONPath=.required, name=required, type=boolean
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
|
||||
// SystemEnv is the Schema for the system environment variables API
|
||||
type SystemEnv struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
EnvVarSpec `json:",inline"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// SystemEnvList contains a list of SystemEnv
|
||||
type SystemEnvList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []SystemEnv `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&SystemEnv{}, &SystemEnvList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+genclient:nonNamespaced
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:resource:scope=Cluster, shortName={term}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.name, name=version name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.spec.version, name=version, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.status.state, name=state, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Terminus is the Schema for the terminuses API
|
||||
type Terminus struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec TerminusSpec `json:"spec,omitempty"`
|
||||
Status TerminusStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// TerminusList contains a list of Terminus
|
||||
type TerminusList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Terminus `json:"items"`
|
||||
}
|
||||
|
||||
// TerminusStatus defines the observed state of Terminus
|
||||
type TerminusStatus struct {
|
||||
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// the state of the terminus: draft, submitted, passed, rejected, suspended, active
|
||||
State string `json:"state"`
|
||||
UpdateTime *metav1.Time `json:"updateTime,omitempty"`
|
||||
StatusTime *metav1.Time `json:"statusTime,omitempty"`
|
||||
}
|
||||
|
||||
// TerminusSpec defines the desired state of Terminus
|
||||
type TerminusSpec struct {
|
||||
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
|
||||
// Important: Run "make" to regenerate code after modifying this file
|
||||
|
||||
// description from terminus
|
||||
Description string `json:"description,omitempty"`
|
||||
|
||||
// the version name of the terminus os
|
||||
Name string `json:"name"`
|
||||
|
||||
// the DisplayName of the terminus
|
||||
DisplayName string `json:"display,omitempty"`
|
||||
|
||||
// the version of the terminus
|
||||
Version string `json:"version"`
|
||||
|
||||
// the release server of the terminus
|
||||
ReleaseServer ReleaseServer `json:"releaseServer"`
|
||||
|
||||
// the extend settings of the terminus
|
||||
Settings map[string]string `json:"settings,omitempty"`
|
||||
}
|
||||
|
||||
// ReleaseServer defines the Terminus new version release server
|
||||
type ReleaseServer struct {
|
||||
|
||||
// serverType: github or others
|
||||
ServerType string `json:"serverType"`
|
||||
|
||||
// github defines github repo where the terminus released
|
||||
Github GithubRepository `json:"github,omitempty"`
|
||||
}
|
||||
|
||||
// GithubRepository defines github repo info
|
||||
type GithubRepository struct {
|
||||
|
||||
// github repository owner
|
||||
Owner string `json:"owner"`
|
||||
|
||||
// github repository name
|
||||
Repo string `json:"repo"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Terminus{}, &TerminusList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//+genclient
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:resource:scope=Namespaced, shortName={userenv}, categories={all}
|
||||
//+kubebuilder:printcolumn:JSONPath=.envName, name=env name, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.value, name=value, type=string
|
||||
//+kubebuilder:printcolumn:JSONPath=.editable, name=editable, type=boolean
|
||||
//+kubebuilder:printcolumn:JSONPath=.required, name=required, type=boolean
|
||||
//+kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp, name=age, type=date
|
||||
|
||||
// UserEnv is the Schema for the user environment variables API
|
||||
type UserEnv struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
EnvVarSpec `json:",inline"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// UserEnvList contains a list of UserEnv
|
||||
type UserEnvList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []UserEnv `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&UserEnv{}, &UserEnvList{})
|
||||
}
|
||||
|
|
@ -0,0 +1,396 @@
|
|||
//go:build !ignore_autogenerated
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppEnv) DeepCopyInto(out *AppEnv) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.Envs != nil {
|
||||
in, out := &in.Envs, &out.Envs
|
||||
*out = make([]AppEnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppEnv.
|
||||
func (in *AppEnv) DeepCopy() *AppEnv {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppEnv)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AppEnv) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppEnvList) DeepCopyInto(out *AppEnvList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AppEnv, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppEnvList.
|
||||
func (in *AppEnvList) DeepCopy() *AppEnvList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppEnvList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AppEnvList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AppEnvVar) DeepCopyInto(out *AppEnvVar) {
|
||||
*out = *in
|
||||
in.EnvVarSpec.DeepCopyInto(&out.EnvVarSpec)
|
||||
if in.ValueFrom != nil {
|
||||
in, out := &in.ValueFrom, &out.ValueFrom
|
||||
*out = new(ValueFrom)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppEnvVar.
|
||||
func (in *AppEnvVar) DeepCopy() *AppEnvVar {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AppEnvVar)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvValueOptionItem) DeepCopyInto(out *EnvValueOptionItem) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvValueOptionItem.
|
||||
func (in *EnvValueOptionItem) DeepCopy() *EnvValueOptionItem {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvValueOptionItem)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EnvVarSpec) DeepCopyInto(out *EnvVarSpec) {
|
||||
*out = *in
|
||||
if in.Options != nil {
|
||||
in, out := &in.Options, &out.Options
|
||||
*out = make([]EnvValueOptionItem, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSpec.
|
||||
func (in *EnvVarSpec) DeepCopy() *EnvVarSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EnvVarSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GithubRepository) DeepCopyInto(out *GithubRepository) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubRepository.
|
||||
func (in *GithubRepository) DeepCopy() *GithubRepository {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GithubRepository)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ReleaseServer) DeepCopyInto(out *ReleaseServer) {
|
||||
*out = *in
|
||||
out.Github = in.Github
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseServer.
|
||||
func (in *ReleaseServer) DeepCopy() *ReleaseServer {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ReleaseServer)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SystemEnv) DeepCopyInto(out *SystemEnv) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.EnvVarSpec.DeepCopyInto(&out.EnvVarSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemEnv.
|
||||
func (in *SystemEnv) DeepCopy() *SystemEnv {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SystemEnv)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *SystemEnv) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SystemEnvList) DeepCopyInto(out *SystemEnvList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]SystemEnv, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemEnvList.
|
||||
func (in *SystemEnvList) DeepCopy() *SystemEnvList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SystemEnvList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *SystemEnvList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Terminus) DeepCopyInto(out *Terminus) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Terminus.
|
||||
func (in *Terminus) DeepCopy() *Terminus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Terminus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Terminus) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TerminusList) DeepCopyInto(out *TerminusList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Terminus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminusList.
|
||||
func (in *TerminusList) DeepCopy() *TerminusList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TerminusList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *TerminusList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TerminusSpec) DeepCopyInto(out *TerminusSpec) {
|
||||
*out = *in
|
||||
out.ReleaseServer = in.ReleaseServer
|
||||
if in.Settings != nil {
|
||||
in, out := &in.Settings, &out.Settings
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminusSpec.
|
||||
func (in *TerminusSpec) DeepCopy() *TerminusSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TerminusSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TerminusStatus) DeepCopyInto(out *TerminusStatus) {
|
||||
*out = *in
|
||||
if in.UpdateTime != nil {
|
||||
in, out := &in.UpdateTime, &out.UpdateTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.StatusTime != nil {
|
||||
in, out := &in.StatusTime, &out.StatusTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TerminusStatus.
|
||||
func (in *TerminusStatus) DeepCopy() *TerminusStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TerminusStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UserEnv) DeepCopyInto(out *UserEnv) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.EnvVarSpec.DeepCopyInto(&out.EnvVarSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserEnv.
|
||||
func (in *UserEnv) DeepCopy() *UserEnv {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UserEnv)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *UserEnv) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UserEnvList) DeepCopyInto(out *UserEnvList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]UserEnv, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserEnvList.
|
||||
func (in *UserEnvList) DeepCopy() *UserEnvList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UserEnvList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *UserEnvList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValueFrom) DeepCopyInto(out *ValueFrom) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom.
|
||||
func (in *ValueFrom) DeepCopy() *ValueFrom {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValueFrom)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
BIN
framework/app-service/bin/controller-gen
Executable file
BIN
framework/app-service/bin/controller-gen
Executable file
Binary file not shown.
321
framework/app-service/cmd/app-service/main.go
Normal file
321
framework/app-service/cmd/app-service/main.go
Normal file
|
|
@ -0,0 +1,321 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
iamv1alpha2 "github.com/beclab/api/iam/v1alpha2"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/controllers"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver"
|
||||
appevent "bytetrade.io/web3os/app-service/pkg/event"
|
||||
"bytetrade.io/web3os/app-service/pkg/generated/clientset/versioned"
|
||||
"bytetrade.io/web3os/app-service/pkg/images"
|
||||
|
||||
kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
|
||||
kbopv1alphav1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
"go.uber.org/zap/zapcore"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
//"k8s.io/client-go/dynamic"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(appv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(sysv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(iamv1alpha2.AddToScheme(scheme))
|
||||
utilruntime.Must(kbappsv1.AddToScheme(scheme))
|
||||
utilruntime.Must(kbopv1alphav1.AddToScheme(scheme))
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
||||
|
||||
const (
|
||||
kubeSphereHostAddr = "KS_APISERVER_SERVICE_HOST" // env name in cluster
|
||||
kubeSphereHostPort = "KS_APISERVER_SERVICE_PORT"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var metricsAddr string
|
||||
var enableLeaderElection bool
|
||||
var probeAddr string
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", ":6080", "The address the metric endpoint binds to.")
|
||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":6081", "The address the probe endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
TimeEncoder: zapcore.RFC3339TimeEncoder,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
config := ctrl.GetConfigOrDie()
|
||||
|
||||
mgr, err := ctrl.NewManager(config, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
HealthProbeBindAddress: probeAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "5117a667.bytetrade.io",
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
// LeaseDuration time first.
|
||||
//
|
||||
// In the default scaffold provided, the program ends immediately after
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "Unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// initialize process environment variables from existing SystemEnv CRs
|
||||
initEnvClient, initEnvErr := client.New(config, client.Options{Scheme: scheme})
|
||||
if initEnvErr != nil {
|
||||
setupLog.Error(initEnvErr, "Unable to create uncached client for SystemEnv initialization")
|
||||
os.Exit(1)
|
||||
}
|
||||
initEnvCtx, cancelInitEnvCtx := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancelInitEnvCtx()
|
||||
if initEnvErr := controllers.InitializeSystemEnvProcessEnv(initEnvCtx, initEnvClient); initEnvErr != nil {
|
||||
setupLog.Error(initEnvErr, "Failed to initialize process env from SystemEnv")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
appClient := versioned.NewForConfigOrDie(config)
|
||||
ictx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
if err = (&controllers.ApplicationReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
AppClientset: appClient,
|
||||
Kubeconfig: config,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "Application")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.SecurityReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
DynamicClient: dynamic.NewForConfigOrDie(config),
|
||||
}).SetupWithManager(ictx, mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "Security")
|
||||
os.Exit(1)
|
||||
}
|
||||
appEventQueue := appevent.NewAppEventQueue(ictx)
|
||||
appevent.SetAppEventQueue(appEventQueue)
|
||||
go appEventQueue.Run()
|
||||
|
||||
if err = (&controllers.ApplicationManagerController{
|
||||
Client: mgr.GetClient(),
|
||||
KubeConfig: config,
|
||||
ImageClient: images.NewImageManager(mgr.GetClient()),
|
||||
//Manager: make(map[string]context.CancelFunc),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "Application Manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.EntranceStatusManagerController{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetUpWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "EntranceStatus Manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.EvictionManagerController{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetUpWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "Eviction Manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.PodAbnormalSuspendAppController{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetUpWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "PodAbnormalSuspendApp")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.TailScaleACLController{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetUpWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "tailScaleACLA manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.UserController{
|
||||
Client: mgr.GetClient(),
|
||||
KubeConfig: config,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "User")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.NamespaceReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "namespace")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.NodeAlertController{
|
||||
Client: mgr.GetClient(),
|
||||
KubeConfig: config,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "NodeAlert")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.SystemEnvController{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "SystemEnv")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.SystemEnvProcessEnvController{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "SystemEnvProcessEnv")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.UserEnvController{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "UserEnv")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.UserEnvSyncController{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "UserEnvSync")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.AppEnvController{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "Unable to create controller", "controller", "AppEnv")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
//+kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "Unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "Unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// sync the api server and the manager with context
|
||||
errCh := make(chan error) // api server error
|
||||
defer close(errCh)
|
||||
|
||||
c := make(chan os.Signal, 2)
|
||||
signal.Notify(c, shutdownSignals...)
|
||||
go func() {
|
||||
select {
|
||||
case <-c:
|
||||
cancelFunc()
|
||||
<-c
|
||||
os.Exit(1) // second signal. Exit directly.
|
||||
case err := <-errCh:
|
||||
cancelFunc()
|
||||
setupLog.Error(err, "Unable to running api server")
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// api server run with request's token
|
||||
// get kubesphere host from env or config file
|
||||
ksHost := os.Getenv(kubeSphereHostAddr)
|
||||
ksPort := os.Getenv(kubeSphereHostPort)
|
||||
if ksHost == "" || ksPort == "" {
|
||||
cancelFunc()
|
||||
setupLog.Error(err, "Failed to get the kubesphere api server host from env")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// start api server
|
||||
func(ctx context.Context, errCh chan error, ksHost string, kubeConfig *rest.Config) {
|
||||
go func() {
|
||||
if err := runAPIServer(ctx, ksHost, kubeConfig, mgr.GetClient()); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}()
|
||||
}(ictx, errCh, fmt.Sprintf("%s:%s", ksHost, ksPort), config)
|
||||
|
||||
setupLog.Info("Starting manager")
|
||||
if err := mgr.Start(ictx); err != nil {
|
||||
cancelFunc()
|
||||
setupLog.Error(err, "Unable to running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cancelFunc()
|
||||
}
|
||||
|
||||
func runAPIServer(ctx context.Context, ksHost string, kubeConfig *rest.Config, client client.Client) error {
|
||||
server, err := apiserver.New(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
err = server.PrepareRun(ksHost, kubeConfig, client, stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = server.Run()
|
||||
return err
|
||||
}
|
||||
93
framework/app-service/cmd/image-service/main.go
Normal file
93
framework/app-service/cmd/image-service/main.go
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/controllers"
|
||||
"context"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"os"
|
||||
"os/signal"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
imageLog = ctrl.Log.WithName("image")
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(appv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(sysv1alpha1.AddToScheme(scheme))
|
||||
//+kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
||||
|
||||
func main() {
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
TimeEncoder: zapcore.RFC3339TimeEncoder,
|
||||
}
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
config := ctrl.GetConfigOrDie()
|
||||
mgr, err := ctrl.NewManager(config, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
HealthProbeBindAddress: ":7081",
|
||||
LeaderElection: false,
|
||||
})
|
||||
if err != nil {
|
||||
imageLog.Error(err, "Unable to start image manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controllers.ImageManagerController{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
imageLog.Error(err, "Unable to create image controller")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err = (&controllers.AppImageInfoController{
|
||||
Client: mgr.GetClient(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
imageLog.Error(err, "Unable to create app image controller")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
imageLog.Error(err, "Unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
imageLog.Error(err, "Unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ictx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
c := make(chan os.Signal, 2)
|
||||
signal.Notify(c, shutdownSignals...)
|
||||
go func() {
|
||||
select {
|
||||
case <-c:
|
||||
cancelFunc()
|
||||
<-c
|
||||
os.Exit(1) // second signal. Exit directly.
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
if err = mgr.Start(ictx); err != nil {
|
||||
cancelFunc()
|
||||
imageLog.Error(err, "Unable to running image manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
cancelFunc()
|
||||
}
|
||||
21
framework/app-service/config/crd/kustomization.yaml
Normal file
21
framework/app-service/config/crd/kustomization.yaml
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# This kustomization.yaml is not intended to be run by itself,
|
||||
# since it depends on service name and namespace that are out of this kustomize package.
|
||||
# It should be run by config/default
|
||||
resources:
|
||||
- bases/app.bytetrade.io_applications.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizeresource
|
||||
|
||||
patchesStrategicMerge:
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
|
||||
# patches here are for enabling the conversion webhook for each CRD
|
||||
#- patches/webhook_in_applications.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizewebhookpatch
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
#- patches/cainjection_in_applications.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
# the following config is for teaching kustomize how to do kustomization for CRDs.
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
||||
19
framework/app-service/config/crd/kustomizeconfig.yaml
Normal file
19
framework/app-service/config/crd/kustomizeconfig.yaml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
|
||||
nameReference:
|
||||
- kind: Service
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/name
|
||||
|
||||
namespace:
|
||||
- kind: CustomResourceDefinition
|
||||
version: v1
|
||||
group: apiextensions.k8s.io
|
||||
path: spec/conversion/webhook/clientConfig/service/namespace
|
||||
create: false
|
||||
|
||||
varReference:
|
||||
- path: metadata/annotations
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||
name: applications.app.bytetrade.io
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# The following patch enables a conversion webhook for the CRD
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: applications.app.bytetrade.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: system
|
||||
name: webhook-service
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
74
framework/app-service/config/default/kustomization.yaml
Normal file
74
framework/app-service/config/default/kustomization.yaml
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# Adds namespace to all resources.
|
||||
namespace: app-service-system
|
||||
|
||||
# Value of this field is prepended to the
|
||||
# names of all resources, e.g. a deployment named
|
||||
# "wordpress" becomes "alices-wordpress".
|
||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
||||
# field above.
|
||||
namePrefix: app-service-
|
||||
|
||||
# Labels to add to all resources and selectors.
|
||||
#commonLabels:
|
||||
# someName: someValue
|
||||
|
||||
bases:
|
||||
- ../crd
|
||||
- ../rbac
|
||||
- ../manager
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- ../webhook
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
||||
#- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
# If you want your controller-manager to expose the /metrics
|
||||
# endpoint w/o any authn/z, please comment the following line.
|
||||
- manager_auth_proxy_patch.yaml
|
||||
|
||||
# Mount the controller config file for loading manager configurations
|
||||
# through a ComponentConfig type
|
||||
#- manager_config_patch.yaml
|
||||
|
||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||
# crd/kustomization.yaml
|
||||
#- manager_webhook_patch.yaml
|
||||
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
|
||||
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
|
||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||
#- webhookcainjection_patch.yaml
|
||||
|
||||
# the following config is for teaching kustomize how to do var substitution
|
||||
vars:
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
||||
# objref:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # this name should match the one in certificate.yaml
|
||||
# fieldref:
|
||||
# fieldpath: metadata.namespace
|
||||
#- name: CERTIFICATE_NAME
|
||||
# objref:
|
||||
# kind: Certificate
|
||||
# group: cert-manager.io
|
||||
# version: v1
|
||||
# name: serving-cert # this name should match the one in certificate.yaml
|
||||
#- name: SERVICE_NAMESPACE # namespace of the service
|
||||
# objref:
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
# fieldref:
|
||||
# fieldpath: metadata.namespace
|
||||
#- name: SERVICE_NAME
|
||||
# objref:
|
||||
# kind: Service
|
||||
# version: v1
|
||||
# name: webhook-service
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
# This patch inject a sidecar container which is a HTTP proxy for the
|
||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- "ALL"
|
||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=0"
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 64Mi
|
||||
- name: manager
|
||||
args:
|
||||
- "--health-probe-bind-address=:8081"
|
||||
- "--metrics-bind-address=127.0.0.1:8080"
|
||||
- "--leader-elect"
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: manager
|
||||
args:
|
||||
- "--config=controller_manager_config.yaml"
|
||||
volumeMounts:
|
||||
- name: manager-config
|
||||
mountPath: /controller_manager_config.yaml
|
||||
subPath: controller_manager_config.yaml
|
||||
volumes:
|
||||
- name: manager-config
|
||||
configMap:
|
||||
name: manager-config
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
|
||||
kind: ControllerManagerConfig
|
||||
health:
|
||||
healthProbeBindAddress: :8081
|
||||
metrics:
|
||||
bindAddress: 127.0.0.1:8080
|
||||
webhook:
|
||||
port: 9443
|
||||
leaderElection:
|
||||
leaderElect: true
|
||||
resourceName: 5117a667.bytetrade.io
|
||||
# leaderElectionReleaseOnCancel defines if the leader should step down volume
|
||||
# when the Manager ends. This requires the binary to immediately end when the
|
||||
# Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
# speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
# LeaseDuration time first.
|
||||
# In the default scaffold provided, the program ends immediately after
|
||||
# the manager stops, so would be fine to enable this option. However,
|
||||
# if you are doing or is intended to do any operation such as perform cleanups
|
||||
# after the manager stops then its usage might be unsafe.
|
||||
# leaderElectionReleaseOnCancel: true
|
||||
10
framework/app-service/config/manager/kustomization.yaml
Normal file
10
framework/app-service/config/manager/kustomization.yaml
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
resources:
|
||||
- manager.yaml
|
||||
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
|
||||
configMapGenerator:
|
||||
- name: manager-config
|
||||
files:
|
||||
- controller_manager_config.yaml
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
resources:
|
||||
- monitor.yaml
|
||||
20
framework/app-service/config/prometheus/monitor.yaml
Normal file
20
framework/app-service/config/prometheus/monitor.yaml
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
|
||||
# Prometheus Monitor Service (Metrics)
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: controller-manager-metrics-monitor
|
||||
namespace: system
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: https
|
||||
scheme: https
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: controller-manager
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# permissions for end users to edit applications.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: application-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applications
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applications/status
|
||||
verbs:
|
||||
- get
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
# permissions for end users to view applications.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: application-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applications
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applications/status
|
||||
verbs:
|
||||
- get
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- "/metrics"
|
||||
verbs:
|
||||
- get
|
||||
17
framework/app-service/config/rbac/auth_proxy_role.yaml
Normal file
17
framework/app-service/config/rbac/auth_proxy_role.yaml
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: proxy-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: proxy-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: proxy-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
15
framework/app-service/config/rbac/auth_proxy_service.yaml
Normal file
15
framework/app-service/config/rbac/auth_proxy_service.yaml
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: controller-manager-metrics-service
|
||||
namespace: system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
control-plane: controller-manager
|
||||
18
framework/app-service/config/rbac/kustomization.yaml
Normal file
18
framework/app-service/config/rbac/kustomization.yaml
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
resources:
|
||||
# All RBAC will be applied under this service account in
|
||||
# the deployment namespace. You may comment out this resource
|
||||
# if your manager will use a service account that exists at
|
||||
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
|
||||
# subjects if changing service account names.
|
||||
- service_account.yaml
|
||||
- role.yaml
|
||||
- role_binding.yaml
|
||||
- leader_election_role.yaml
|
||||
- leader_election_role_binding.yaml
|
||||
# Comment the following 4 lines if you want to disable
|
||||
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
|
||||
# which protects your /metrics endpoint.
|
||||
- auth_proxy_service.yaml
|
||||
- auth_proxy_role.yaml
|
||||
- auth_proxy_role_binding.yaml
|
||||
- auth_proxy_client_clusterrole.yaml
|
||||
37
framework/app-service/config/rbac/leader_election_role.yaml
Normal file
37
framework/app-service/config/rbac/leader_election_role.yaml
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# permissions to do leader election.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: leader-election-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: leader-election-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: leader-election-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
84
framework/app-service/config/rbac/role.yaml
Normal file
84
framework/app-service/config/rbac/role.yaml
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applicationmanagers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applicationmanagers/status
|
||||
- applications/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applications
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- app.bytetrade.io
|
||||
resources:
|
||||
- applications/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- iam.kubesphere.io
|
||||
resources:
|
||||
- users
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- sys.bytetrade.io
|
||||
resources:
|
||||
- appenvs
|
||||
- systemenvs
|
||||
- userenvs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- sys.bytetrade.io
|
||||
resources:
|
||||
- appenvs/status
|
||||
- systemenvs/status
|
||||
- userenvs/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
12
framework/app-service/config/rbac/role_binding.yaml
Normal file
12
framework/app-service/config/rbac/role_binding.yaml
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: manager-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: manager-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
5
framework/app-service/config/rbac/service_account.yaml
Normal file
5
framework/app-service/config/rbac/service_account.yaml
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: controller-manager
|
||||
namespace: system
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: app.bytetrade.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: bfl
|
||||
labels:
|
||||
app.bytetrade.io/name: bfl
|
||||
spec:
|
||||
# TODO(user): Add fields here
|
||||
name: bfl
|
||||
namespace: default
|
||||
owner: admin
|
||||
|
||||
268
framework/app-service/controllers/appenv_controller.go
Normal file
268
framework/app-service/controllers/appenv_controller.go
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type AppEnvController struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=appenvs,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=appenvs/status,verbs=get;update;patch
|
||||
//+kubebuilder:groups=app.bytetrade.io,resources=applicationmanagers,verbs=get;list;watch;update;patch
|
||||
//+kubebuilder:rbac:groups=app.bytetrade.io,resources=applicationmanagers/status,verbs=get;update;patch
|
||||
|
||||
func (r *AppEnvController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&sysv1alpha1.AppEnv{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *AppEnvController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.Infof("Reconciling AppEnv: %s", req.NamespacedName)
|
||||
|
||||
var appEnv sysv1alpha1.AppEnv
|
||||
if err := r.Get(ctx, req.NamespacedName, &appEnv); err != nil {
|
||||
//todo: more detailed logic
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return r.reconcileAppEnv(ctx, &appEnv)
|
||||
}
|
||||
|
||||
func (r *AppEnvController) reconcileAppEnv(ctx context.Context, appEnv *sysv1alpha1.AppEnv) (ctrl.Result, error) {
|
||||
klog.Infof("Processing AppEnv change: %s/%s", appEnv.Namespace, appEnv.Name)
|
||||
|
||||
// Check if this AppEnv was triggered by an environment variable change
|
||||
if appEnv.Annotations != nil && appEnv.Annotations[constants.AppEnvSyncAnnotation] != "" {
|
||||
klog.Infof("AppEnv %s/%s triggered by environment variable change: %s",
|
||||
appEnv.Namespace, appEnv.Name, appEnv.Annotations[constants.AppEnvSyncAnnotation])
|
||||
|
||||
// Clear the annotation immediately - the update will trigger another reconcile
|
||||
if err := r.clearSyncAnnotation(ctx, appEnv); err != nil {
|
||||
klog.Errorf("Failed to clear sync annotation for AppEnv %s/%s: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Return immediately - the annotation update will trigger another reconcile
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// This reconcile is not triggered by annotation, proceed with normal sync
|
||||
if err := r.syncEnvValues(ctx, appEnv); err != nil {
|
||||
klog.Errorf("Failed to sync environment values for AppEnv %s/%s: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if appEnv.NeedApply {
|
||||
// check for active user batch lease to avoid mid-batch apply
|
||||
userNamespace := utils.UserspaceName(appEnv.AppOwner)
|
||||
lease := &coordinationv1.Lease{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: "env-batch-lock", Namespace: userNamespace}, lease); err == nil {
|
||||
if isLeaseActive(lease) {
|
||||
klog.Infof("User batch lease is active for app: %s owner: %s, requeueing", appEnv.AppName, appEnv.AppOwner)
|
||||
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
|
||||
}
|
||||
}
|
||||
if err := r.triggerApplyEnv(ctx, appEnv); err != nil {
|
||||
klog.Errorf("Failed to trigger ApplyEnv for AppEnv %s/%s: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := r.markEnvApplied(ctx, appEnv); err != nil {
|
||||
klog.Errorf("Failed to mark AppEnv %s/%s as applied: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AppEnvController) syncEnvValues(ctx context.Context, appEnv *sysv1alpha1.AppEnv) error {
|
||||
original := appEnv.DeepCopy()
|
||||
|
||||
// Get SystemEnv values
|
||||
var systemEnvList sysv1alpha1.SystemEnvList
|
||||
if err := r.List(ctx, &systemEnvList); err != nil {
|
||||
return fmt.Errorf("failed to list SystemEnvs: %v", err)
|
||||
}
|
||||
systemEnvMap := make(map[string]*sysv1alpha1.SystemEnv)
|
||||
for _, sysEnv := range systemEnvList.Items {
|
||||
systemEnvMap[sysEnv.EnvName] = &sysEnv
|
||||
}
|
||||
|
||||
// Get UserEnv values from user-space-{appOwner} namespace
|
||||
var userEnvList sysv1alpha1.UserEnvList
|
||||
userNamespace := utils.UserspaceName(appEnv.AppOwner)
|
||||
if err := r.List(ctx, &userEnvList, client.InNamespace(userNamespace)); err != nil {
|
||||
return fmt.Errorf("failed to list UserEnvs in namespace %s: %v", userNamespace, err)
|
||||
}
|
||||
userEnvMap := make(map[string]*sysv1alpha1.UserEnv)
|
||||
for _, userEnv := range userEnvList.Items {
|
||||
userEnvMap[userEnv.EnvName] = &userEnv
|
||||
}
|
||||
|
||||
updated := false
|
||||
for i := range appEnv.Envs {
|
||||
envVar := &appEnv.Envs[i]
|
||||
if envVar.ValueFrom != nil {
|
||||
var refValue string
|
||||
var refType string
|
||||
var refSource string
|
||||
|
||||
// Check if both UserEnv and SystemEnv exist with the same name
|
||||
var userEnv *sysv1alpha1.UserEnv
|
||||
var sysEnv *sysv1alpha1.SystemEnv
|
||||
if userEnv = userEnvMap[envVar.ValueFrom.EnvName]; userEnv != nil {
|
||||
refValue = userEnv.GetEffectiveValue()
|
||||
refType = userEnv.Type
|
||||
refSource = "UserEnv"
|
||||
}
|
||||
if sysEnv = systemEnvMap[envVar.ValueFrom.EnvName]; sysEnv != nil {
|
||||
if userEnv != nil {
|
||||
// Both exist - this is unexpected, log a warning
|
||||
klog.Warningf("AppEnv %s/%s references environment variable %s which exists in both UserEnv and SystemEnv. UserEnv value will be used.",
|
||||
appEnv.Namespace, appEnv.Name, envVar.ValueFrom.EnvName)
|
||||
} else {
|
||||
refValue = sysEnv.GetEffectiveValue()
|
||||
refType = sysEnv.Type
|
||||
refSource = "SystemEnv"
|
||||
}
|
||||
}
|
||||
|
||||
// do not check for non-empty value as an existing refed env may also contain empty value
|
||||
if userEnv != nil || sysEnv != nil {
|
||||
if envVar.Value != refValue || envVar.Type != refType || envVar.ValueFrom.Status != constants.EnvRefStatusSynced {
|
||||
envVar.Value = refValue
|
||||
envVar.Type = refType
|
||||
envVar.ValueFrom.Status = constants.EnvRefStatusSynced
|
||||
updated = true
|
||||
if envVar.ApplyOnChange {
|
||||
appEnv.NeedApply = true
|
||||
}
|
||||
klog.V(4).Infof("AppEnv %s/%s environment variable %s synced from %s with value: %s",
|
||||
appEnv.Namespace, appEnv.Name, envVar.ValueFrom.EnvName, refSource, refValue)
|
||||
}
|
||||
} else {
|
||||
if envVar.ValueFrom.Status != constants.EnvRefStatusNotFound {
|
||||
envVar.ValueFrom.Status = constants.EnvRefStatusNotFound
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if updated {
|
||||
if err := r.Patch(ctx, appEnv, client.MergeFrom(original)); err != nil {
|
||||
return fmt.Errorf("failed to update AppEnv %s/%s: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AppEnvController) triggerApplyEnv(ctx context.Context, appEnv *sysv1alpha1.AppEnv) error {
|
||||
klog.Infof("Triggering ApplyEnv for app: %s owner: %s", appEnv.AppName, appEnv.AppOwner)
|
||||
|
||||
appMgrName, err := apputils.FmtAppMgrName(appEnv.AppName, appEnv.AppOwner, appEnv.Namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to format app manager name: %v", err)
|
||||
}
|
||||
|
||||
var targetAppMgr appv1alpha1.ApplicationManager
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: appMgrName}, &targetAppMgr); err != nil {
|
||||
return fmt.Errorf("failed to get ApplicationManager %s: %v", appMgrName, err)
|
||||
}
|
||||
|
||||
state := targetAppMgr.Status.State
|
||||
if !appstate.IsOperationAllowed(state, appv1alpha1.ApplyEnvOp) {
|
||||
// trigger backoff retry and this is the expected behaviour
|
||||
return fmt.Errorf("app %s is currently in state %s, applyEnv not allowed", appEnv.AppName, state)
|
||||
}
|
||||
|
||||
appMgrCopy := targetAppMgr.DeepCopy()
|
||||
appMgrCopy.Spec.OpType = appv1alpha1.ApplyEnvOp
|
||||
|
||||
if err := r.Patch(ctx, appMgrCopy, client.MergeFrom(&targetAppMgr)); err != nil {
|
||||
return fmt.Errorf("failed to update ApplicationManager Spec.OpType: %v", err)
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
opID := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
status := appv1alpha1.ApplicationManagerStatus{
|
||||
OpType: appv1alpha1.ApplyEnvOp,
|
||||
State: appv1alpha1.ApplyingEnv,
|
||||
OpID: opID,
|
||||
Message: "waiting for applying env",
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
}
|
||||
|
||||
am, err := apputils.UpdateAppMgrStatus(targetAppMgr.Name, status)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ApplicationManager Status: %v", err)
|
||||
}
|
||||
utils.PublishAppEvent(utils.EventParams{
|
||||
Owner: am.Spec.AppOwner,
|
||||
Name: am.Spec.AppName,
|
||||
OpType: string(am.Status.OpType),
|
||||
OpID: opID,
|
||||
State: appv1alpha1.ApplyingEnv.String(),
|
||||
RawAppName: am.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: apputils.AppTitle(am.Spec.Config),
|
||||
})
|
||||
|
||||
klog.Infof("Successfully triggered ApplyEnv for app: %s owner: %s", appEnv.AppName, appEnv.AppOwner)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AppEnvController) clearSyncAnnotation(ctx context.Context, appEnv *sysv1alpha1.AppEnv) error {
|
||||
if appEnv.Annotations == nil || appEnv.Annotations[constants.AppEnvSyncAnnotation] == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
original := appEnv.DeepCopy()
|
||||
delete(appEnv.Annotations, constants.AppEnvSyncAnnotation)
|
||||
|
||||
klog.Infof("Clearing environment sync annotation from AppEnv %s/%s", appEnv.Namespace, appEnv.Name)
|
||||
return r.Patch(ctx, appEnv, client.MergeFrom(original))
|
||||
}
|
||||
|
||||
func (r *AppEnvController) markEnvApplied(ctx context.Context, appEnv *sysv1alpha1.AppEnv) error {
|
||||
if !appEnv.NeedApply {
|
||||
return nil
|
||||
}
|
||||
original := appEnv.DeepCopy()
|
||||
appEnv.NeedApply = false
|
||||
return r.Patch(ctx, appEnv, client.MergeFrom(original))
|
||||
}
|
||||
|
||||
// isLeaseActive returns true if now < RenewTime + LeaseDurationSeconds
|
||||
func isLeaseActive(l *coordinationv1.Lease) bool {
|
||||
if l == nil || l.Spec.RenewTime == nil || l.Spec.LeaseDurationSeconds == nil {
|
||||
return false
|
||||
}
|
||||
exp := l.Spec.RenewTime.Add(time.Duration(*l.Spec.LeaseDurationSeconds) * time.Second)
|
||||
return time.Now().Before(exp)
|
||||
}
|
||||
927
framework/app-service/controllers/application_controller.go
Normal file
927
framework/app-service/controllers/application_controller.go
Normal file
|
|
@ -0,0 +1,927 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/appcfg"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/generated/clientset/versioned"
|
||||
"bytetrade.io/web3os/app-service/pkg/helm"
|
||||
"bytetrade.io/web3os/app-service/pkg/kubesphere"
|
||||
"bytetrade.io/web3os/app-service/pkg/users/userspace"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
|
||||
"github.com/thoas/go-funk"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/storage/driver"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
applicationFinalizer = "finalizers.bytetrade.io/application"
|
||||
)
|
||||
|
||||
var protectedRelease = []string{"headscale"}
|
||||
|
||||
// ApplicationReconciler reconciles a Application object
|
||||
type ApplicationReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
AppClientset *versioned.Clientset
|
||||
Kubeconfig *rest.Config
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=app.bytetrade.io,resources=applications,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=app.bytetrade.io,resources=applications/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=app.bytetrade.io,resources=applications/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||
// the Application object against the actual cluster state, and then
|
||||
// perform operations to make the cluster state reflect the state specified by
|
||||
// the user.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile
|
||||
func (r *ApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
|
||||
ctrl.Log.Info("reconcile request", "name", req.Name, "namespace", req.Namespace)
|
||||
|
||||
if req.Namespace == "" {
|
||||
// ignore for-input object watch
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
validAppObjects := make(map[string]client.Object)
|
||||
deletingObjects := make(map[string]client.Object)
|
||||
|
||||
reqAppNames := strings.Split(req.Name, ",")
|
||||
for _, name := range reqAppNames {
|
||||
// init requested app object
|
||||
validAppObjects[name] = nil
|
||||
}
|
||||
|
||||
// get deployments installed by app installer
|
||||
findAppObject := func(list client.ObjectList) error {
|
||||
if err := r.List(ctx, list, client.InNamespace(req.Namespace)); err == nil {
|
||||
listObjects, err := apimeta.ExtractList(list)
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "extract list error", "name label", req.Name, "namespace", req.Namespace)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, o := range listObjects {
|
||||
d := o.(client.Object)
|
||||
if owner, ok := d.GetLabels()[constants.ApplicationOwnerLabel]; !ok || owner == "" {
|
||||
// ignore ownerless deployments
|
||||
continue
|
||||
}
|
||||
if middleware, ok := d.GetLabels()[constants.ApplicationMiddlewareLabel]; ok && middleware == "true" {
|
||||
continue
|
||||
}
|
||||
// for multi-app in one deployment/statefulset, we can not find only one object via
|
||||
// namespace and label filter, so have to filter in object list
|
||||
apps := getAppName(d)
|
||||
if len(apps) == 0 {
|
||||
continue
|
||||
}
|
||||
klog.Infof("apps: %v", apps)
|
||||
for _, name := range apps {
|
||||
// found a valid app object
|
||||
if d.GetDeletionTimestamp() == nil {
|
||||
validAppObjects[name] = d
|
||||
klog.Errorf("valid app name: %s", name)
|
||||
} else {
|
||||
deletingObjects[name] = d
|
||||
klog.Errorf("deleting app name: %s", name)
|
||||
} // end if deployment is deleted
|
||||
}
|
||||
|
||||
} // end loop deployment.Items
|
||||
} else {
|
||||
ctrl.Log.Error(err, "list deployments or statefulset error", "name label", req.Name, "namespace", req.Namespace)
|
||||
return err
|
||||
} // end if get deployments list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var deployemnts appsv1.DeploymentList
|
||||
err := findAppObject(&deployemnts)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// try to get statefulset
|
||||
var statefulsets appsv1.StatefulSetList
|
||||
err = findAppObject(&statefulsets)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for name := range deletingObjects {
|
||||
if _, ok := validAppObjects[name]; !ok {
|
||||
validAppObjects[name] = nil
|
||||
}
|
||||
}
|
||||
|
||||
for name, validAppObject := range validAppObjects {
|
||||
app, err := r.AppClientset.AppV1alpha1().Applications().Get(ctx, fmtAppName(name, req.Namespace), metav1.GetOptions{})
|
||||
klog.Infof("get app err=%v, validateAPpis nil %v,app=%v", err, validAppObject == nil, fmtAppName(name, req.Namespace))
|
||||
if validAppObject != nil {
|
||||
// create or update application
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// check if a new deployment created or not
|
||||
ctrl.Log.Info("create app from deployment watching", "name", validAppObject.GetName(), "namespace", validAppObject.GetNamespace(), "appname", name)
|
||||
err = r.createApplication(ctx, req, validAppObject, name)
|
||||
if err != nil {
|
||||
ctrl.Log.Info("create app failed", "app", name, "err", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
} // end if error
|
||||
|
||||
ctrl.Log.Info("Application update", "name", app.Name, "spec.name", app.Spec.Name, "spec.owner", app.Spec.Owner)
|
||||
err = r.updateApplication(ctx, req, validAppObject, app, name)
|
||||
if err != nil {
|
||||
return ctrl.Result{Requeue: true}, err
|
||||
}
|
||||
//}
|
||||
} else {
|
||||
// deployment or statefulset is nil, delete application
|
||||
if err == nil && app != nil {
|
||||
ctrl.Log.Info("Application delete", "name", app.Name, "spec.name", app.Spec.Name, "spec.owner", app.Spec.Owner)
|
||||
err = r.Delete(ctx, app.DeepCopy())
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if funk.Contains(protectedRelease, app.Spec.Name) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
err = r.clearHelmHistory(app.Spec.Name, app.Spec.Namespace)
|
||||
if err != nil && !errors.Is(err, driver.ErrReleaseNotFound) {
|
||||
return ctrl.Result{RequeueAfter: 2 * time.Second}, err
|
||||
}
|
||||
|
||||
} else if apierrors.IsNotFound(err) {
|
||||
// app not found, just return
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := ctrl.NewControllerManagedBy(mgr).
|
||||
For(&appv1alpha1.Application{}).
|
||||
Build(r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// watch the application enqueue formarted request
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&appv1alpha1.Application{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, app *appv1alpha1.Application) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: app.Spec.Name,
|
||||
Namespace: app.Spec.Namespace}},
|
||||
}
|
||||
}),
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
watches := []client.Object{
|
||||
&appsv1.Deployment{},
|
||||
&appsv1.StatefulSet{},
|
||||
}
|
||||
|
||||
// watch the object installed by app-installer
|
||||
for _, w := range watches {
|
||||
if err = r.addWatch(c, mgr.GetCache(), w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) addWatch(c controller.Controller, cache cache.Cache, watchedObject client.Object) error {
|
||||
return c.Watch(source.Kind(
|
||||
cache,
|
||||
watchedObject,
|
||||
handler.EnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, h client.Object) []reconcile.Request {
|
||||
appNames := getAppName(h)
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: strings.Join(appNames, ","),
|
||||
Namespace: h.GetNamespace()}}}
|
||||
}),
|
||||
predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return isApp(e.ObjectNew, e.ObjectOld)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return isApp(e.Object)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return isApp(e.Object)
|
||||
},
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
// TODO: get application other spec info
|
||||
// TODO: make sure entrance service is applied
|
||||
func (r *ApplicationReconciler) createApplication(ctx context.Context, req ctrl.Request,
|
||||
deployment client.Object, name string) error {
|
||||
owner := deployment.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
appNames := getAppName(deployment)
|
||||
isMultiApp := len(appNames) > 1
|
||||
icon := getAppIcon(deployment)
|
||||
entrancesMap, err := r.getEntranceServiceAddress(ctx, deployment, isMultiApp)
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "get entrance error")
|
||||
}
|
||||
servicePortsMap, err := r.getAppPorts(ctx, deployment, isMultiApp)
|
||||
if err != nil {
|
||||
klog.Warningf("get app ports err=%v", err)
|
||||
}
|
||||
tailScale, err := r.getAppTailScale(deployment)
|
||||
if err != nil {
|
||||
klog.Warningf("get app tailscale acls err=%v", err)
|
||||
}
|
||||
|
||||
var appid string
|
||||
var isSysApp bool
|
||||
if userspace.IsSysApp(name) {
|
||||
appid = name
|
||||
isSysApp = true
|
||||
} else {
|
||||
appid = appv1alpha1.AppName(name).GetAppID()
|
||||
}
|
||||
settings, sharedEntrances := r.getAppSettings(ctx, name, appid, owner, deployment, isMultiApp, entrancesMap[name])
|
||||
|
||||
rawAppName := name
|
||||
if deployment.GetLabels()[constants.ApplicationRawAppNameLabel] != "" {
|
||||
rawAppName = deployment.GetLabels()[constants.ApplicationRawAppNameLabel]
|
||||
}
|
||||
// create the application cr
|
||||
newapp := &appv1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmtAppName(name, req.Namespace),
|
||||
},
|
||||
Spec: appv1alpha1.ApplicationSpec{
|
||||
Name: name,
|
||||
RawAppName: rawAppName,
|
||||
Appid: appid,
|
||||
IsSysApp: isSysApp,
|
||||
Namespace: req.Namespace,
|
||||
Owner: owner, // get from deployment
|
||||
DeploymentName: deployment.GetName(),
|
||||
Entrances: entrancesMap[name],
|
||||
SharedEntrances: sharedEntrances,
|
||||
Ports: servicePortsMap[name],
|
||||
Icon: icon[name],
|
||||
Settings: settings,
|
||||
},
|
||||
}
|
||||
if tailScale != nil {
|
||||
newapp.Spec.TailScale = *tailScale
|
||||
}
|
||||
app, err := r.AppClientset.AppV1alpha1().Applications().Create(ctx, newapp, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "create application error")
|
||||
}
|
||||
now := metav1.Now()
|
||||
appCopy := app.DeepCopy()
|
||||
if userspace.IsSysApp(app.Spec.Name) {
|
||||
err = apputils.CreateSysAppMgr(app.Spec.Name, app.Spec.Owner)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create applicationmanagers for system app=%s err=%v", app.Spec.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
app.Status.StatusTime = &now
|
||||
app.Status.UpdateTime = &now
|
||||
app.Status.State = appv1alpha1.AppNotReady.String()
|
||||
|
||||
entranceStatues := make([]appv1alpha1.EntranceStatus, 0, len(app.Spec.Entrances))
|
||||
|
||||
for _, e := range app.Spec.Entrances {
|
||||
if e.Skip {
|
||||
continue
|
||||
}
|
||||
state := appv1alpha1.EntranceNotReady
|
||||
if userspace.IsSysApp(app.Spec.Name) {
|
||||
state = appv1alpha1.EntranceRunning
|
||||
}
|
||||
entranceStatues = append(entranceStatues, appv1alpha1.EntranceStatus{
|
||||
Name: e.Name,
|
||||
State: state,
|
||||
StatusTime: &now,
|
||||
Reason: state.String(),
|
||||
})
|
||||
}
|
||||
app.Status.EntranceStatuses = entranceStatues
|
||||
|
||||
err = r.Status().Patch(ctx, app, client.MergeFrom(appCopy))
|
||||
if err != nil {
|
||||
klog.Infof("Failed to patch err=%v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) updateApplication(ctx context.Context, req ctrl.Request,
|
||||
deployment client.Object, app *appv1alpha1.Application, name string) error {
|
||||
appCopy := app.DeepCopy()
|
||||
|
||||
tailScale, err := r.getAppTailScale(deployment)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get tailscale err=%v", err)
|
||||
}
|
||||
|
||||
owner := deployment.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
klog.Infof("in updateApplication ....appname: %v", app.Spec.Name)
|
||||
icons := getAppIcon(deployment)
|
||||
var icon string
|
||||
|
||||
icon = icons[name]
|
||||
|
||||
appCopy.Spec.Name = name
|
||||
appCopy.Spec.Namespace = deployment.GetNamespace()
|
||||
appCopy.Spec.Owner = owner
|
||||
appCopy.Spec.DeploymentName = deployment.GetName()
|
||||
appCopy.Spec.Icon = icon
|
||||
if tailScale != nil {
|
||||
appCopy.Spec.TailScale = *tailScale
|
||||
}
|
||||
|
||||
actionConfig, _, err := helm.InitConfig(r.Kubeconfig, appCopy.Spec.Namespace)
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "init helm config error")
|
||||
}
|
||||
|
||||
if !userspace.IsSysApp(app.Spec.Name) {
|
||||
version, _, err := apputils.GetDeployedReleaseVersion(actionConfig, name)
|
||||
if err != nil && !errors.Is(err, driver.ErrReleaseNotFound) {
|
||||
ctrl.Log.Error(err, "get deployed release version error")
|
||||
}
|
||||
if err == nil {
|
||||
appCopy.Spec.Settings["version"] = version
|
||||
}
|
||||
}
|
||||
|
||||
err = r.Patch(ctx, appCopy, client.MergeFrom(app))
|
||||
if err != nil {
|
||||
klog.Infof("update spec failed %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("appCopy.Status: %v", appCopy.Status)
|
||||
newAppState := r.calAppState(&appCopy.Status)
|
||||
klog.Infof("application controller newAppState: %v", newAppState)
|
||||
klog.Infof("application controller oldAppState: %v", appCopy.Status.State)
|
||||
|
||||
if appCopy.Status.State != newAppState {
|
||||
klog.Infof("set appCopy.State:.......new: %v", newAppState)
|
||||
appCopy.Status.State = newAppState
|
||||
now := metav1.Now()
|
||||
appCopy.Status.LastTransitionTime = &now
|
||||
|
||||
err = r.Status().Patch(ctx, appCopy, client.MergeFrom(app))
|
||||
if err != nil {
|
||||
klog.Infof("update xxx error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// merge settings
|
||||
//for k, v := range settings {
|
||||
// if setting, ok := appCopy.Spec.Settings[k]; !ok || setting != v {
|
||||
// appCopy.Spec.Settings[k] = v
|
||||
// }
|
||||
//}
|
||||
|
||||
//var a appv1alpha1.Application
|
||||
//err = r.Get(ctx, types.NamespacedName{Name: app.Name}, &a)
|
||||
//if err != nil {
|
||||
// klog.Infof("get app failed %v", err)
|
||||
// return err
|
||||
//}
|
||||
//klog.Infof("appState: ..%v", a.Status.State)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) getEntranceServiceAddress(ctx context.Context, deployment client.Object, isMultiApp bool) (map[string][]appv1alpha1.Entrance, error) {
|
||||
entrancesLabel := deployment.GetAnnotations()[constants.ApplicationEntrancesKey]
|
||||
entrancesMap := make(map[string][]appv1alpha1.Entrance)
|
||||
|
||||
if len(entrancesLabel) == 0 {
|
||||
return entrancesMap, errors.New("invalid service address label")
|
||||
}
|
||||
klog.Infof("isMultiApp: %v", isMultiApp)
|
||||
var err error
|
||||
if isMultiApp {
|
||||
err = json.Unmarshal([]byte(entrancesLabel), &entrancesMap)
|
||||
if err != nil {
|
||||
klog.Infof("unmarshalMAp error=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
appName := deployment.GetLabels()[constants.ApplicationNameLabel]
|
||||
entrances := make([]appv1alpha1.Entrance, 0)
|
||||
err = json.Unmarshal([]byte(entrancesLabel), &entrances)
|
||||
if err != nil {
|
||||
klog.Infof("unmarshal error=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
entrancesMap[appName] = entrances
|
||||
}
|
||||
|
||||
// set default value and check if service exists
|
||||
for _, entrances := range entrancesMap {
|
||||
for i, e := range entrances {
|
||||
if e.AuthLevel == "" {
|
||||
entrances[i].AuthLevel = constants.AuthorizationLevelOfPrivate
|
||||
}
|
||||
if e.OpenMethod == "" {
|
||||
entrances[i].OpenMethod = "default"
|
||||
}
|
||||
objectKey := types.NamespacedName{Namespace: deployment.GetNamespace(), Name: e.Host}
|
||||
var svc corev1.Service
|
||||
if err = r.Get(ctx, objectKey, &svc); err == nil {
|
||||
if !checkPortOfService(&svc, e.Port) {
|
||||
return nil, fmt.Errorf("entrance: %s not found", e.Host)
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return entrancesMap, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) getAppSettings(ctx context.Context, appName, appId, owner string, deployment client.Object,
|
||||
isMulti bool, entrances []appv1alpha1.Entrance) (settings map[string]string, sharedEntrances []appv1alpha1.Entrance) {
|
||||
settings = make(map[string]string)
|
||||
settings["source"] = api.Unknown.String()
|
||||
rawAppName := appName
|
||||
if deployment.GetLabels()[constants.ApplicationRawAppNameLabel] != "" {
|
||||
rawAppName = deployment.GetLabels()[constants.ApplicationRawAppNameLabel]
|
||||
}
|
||||
|
||||
if chartSource, ok := deployment.GetAnnotations()[constants.ApplicationSourceLabel]; ok {
|
||||
settings["source"] = chartSource
|
||||
}
|
||||
|
||||
if systemService, ok := deployment.GetLabels()[constants.ApplicationSystemServiceLabel]; ok {
|
||||
settings["system_service"] = systemService
|
||||
}
|
||||
|
||||
titles := getAppTitle(deployment)
|
||||
settings["title"] = titles[appName]
|
||||
|
||||
if target, ok := deployment.GetLabels()[constants.ApplicationTargetLabel]; ok {
|
||||
settings["target"] = target
|
||||
}
|
||||
|
||||
versions := getAppVersion(deployment)
|
||||
settings["version"] = versions[appName]
|
||||
|
||||
settings["clusterScoped"] = "false"
|
||||
settings["requiredGPU"] = deployment.GetAnnotations()[constants.ApplicationRequiredGPU]
|
||||
//clusterScoped, ok := deployment.GetAnnotations()[constants.ApplicationClusterScoped]
|
||||
//if ok && clusterScoped == "true" {
|
||||
// settings["clusterScoped"] = "true"
|
||||
//}
|
||||
|
||||
if defaultDomainAnnotation, ok := deployment.GetAnnotations()[constants.ApplicationDefaultThirdLevelDomain]; ok {
|
||||
var allDomainConfigs []appv1alpha1.DefaultThirdLevelDomainConfig
|
||||
err := json.Unmarshal([]byte(defaultDomainAnnotation), &allDomainConfigs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unmarshal default domain annotation err=%v", err)
|
||||
} else {
|
||||
var appDomainConfigs []appv1alpha1.DefaultThirdLevelDomainConfig
|
||||
for _, config := range allDomainConfigs {
|
||||
if config.AppName == appName {
|
||||
appDomainConfigs = append(appDomainConfigs, config)
|
||||
}
|
||||
}
|
||||
|
||||
if len(appDomainConfigs) > 0 {
|
||||
domainConfigBytes, err := json.Marshal(appDomainConfigs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to marshal domain configs err=%v", err)
|
||||
} else {
|
||||
settings["defaultThirdLevelDomainConfig"] = string(domainConfigBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// not sys applications.
|
||||
if !userspace.IsSysApp(rawAppName) {
|
||||
if appCfg, err := appcfg.GetAppInstallationConfig(appName, owner); err != nil {
|
||||
klog.Infof("Failed to get app configuration appName=%s owner=%s err=%v", appName, owner, err)
|
||||
} else {
|
||||
policyStr, err := getApplicationPolicy(appCfg.Policies, appCfg.Entrances)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to encode json err=%v", err)
|
||||
} else if len(policyStr) > 0 {
|
||||
settings[applicationSettingsPolicyKey] = policyStr
|
||||
}
|
||||
|
||||
// set cluster-scoped info to settings
|
||||
if appCfg.AppScope.ClusterScoped {
|
||||
settings["clusterScoped"] = "true"
|
||||
if len(appCfg.AppScope.AppRef) > 0 {
|
||||
settings["clusterAppRef"] = strings.Join(appCfg.AppScope.AppRef, ",")
|
||||
}
|
||||
|
||||
sharedEntrances = appCfg.SharedEntrances
|
||||
}
|
||||
if appCfg.MobileSupported {
|
||||
settings["mobileSupported"] = "true"
|
||||
} else {
|
||||
settings["mobileSupported"] = "false"
|
||||
}
|
||||
|
||||
if appCfg.OIDC.Enabled {
|
||||
// get oidc client id and secret created at installing
|
||||
var secret corev1.Secret
|
||||
err = r.Get(ctx,
|
||||
types.NamespacedName{Namespace: deployment.GetNamespace(), Name: constants.OIDCSecret},
|
||||
&secret)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get app's oidc secret err=%v, app=%s, namespace=%s", err, appName, deployment.GetNamespace())
|
||||
} else {
|
||||
settings["oidc.client.id"] = string(secret.Data["id"])
|
||||
|
||||
encryptSecret, err := utils.Pbkdf2Crypto(string(secret.Data["secret"]))
|
||||
if err != nil {
|
||||
klog.Error("encrypt secret error, ", err)
|
||||
}
|
||||
settings["oidc.client.secret"] = encryptSecret
|
||||
|
||||
zone, err := kubesphere.GetUserZone(ctx, owner)
|
||||
if err != nil {
|
||||
klog.Error("get user zone error, ", err)
|
||||
} else {
|
||||
|
||||
multiEntrance := len(appCfg.Entrances) > 1
|
||||
for i, e := range appCfg.Entrances {
|
||||
if e.Name == appCfg.OIDC.EntranceName {
|
||||
var appUrl string
|
||||
if multiEntrance {
|
||||
appUrl = fmt.Sprintf("https://%s%d.%s%s", appId, i, zone, appCfg.OIDC.RedirectUri)
|
||||
} else {
|
||||
appUrl = fmt.Sprintf("https://%s.%s%s", appId, zone, appCfg.OIDC.RedirectUri)
|
||||
}
|
||||
settings["oidc.client.redirect_uri"] = appUrl
|
||||
}
|
||||
}
|
||||
|
||||
} // end of if get zone
|
||||
} // end of if get secret
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// sys applications.
|
||||
type Policies struct {
|
||||
Policies []appcfg.Policy `json:"policies"`
|
||||
}
|
||||
applicationPoliciesFromAnnotation, ok := deployment.GetAnnotations()[constants.ApplicationPolicies]
|
||||
|
||||
var policy Policies
|
||||
if ok {
|
||||
if isMulti {
|
||||
m := make(map[string]Policies)
|
||||
err := json.Unmarshal([]byte(applicationPoliciesFromAnnotation), &m)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unmarshal applicationPoliciesFromAnnotation err=%v", err)
|
||||
}
|
||||
policy = m[appName]
|
||||
} else {
|
||||
err := json.Unmarshal([]byte(applicationPoliciesFromAnnotation), &policy)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to unmarshal applicationPoliciesFromAnnotation err=%v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
klog.Infof("applicationPoliciesFromAnnotation: %s", applicationPoliciesFromAnnotation)
|
||||
klog.Infof("policy: %#v", policy)
|
||||
|
||||
// transform from Policy to AppPolicy
|
||||
var appPolicies []appcfg.AppPolicy
|
||||
for _, p := range policy.Policies {
|
||||
d, _ := time.ParseDuration(p.Duration)
|
||||
appPolicies = append(appPolicies, appcfg.AppPolicy{
|
||||
EntranceName: p.EntranceName,
|
||||
URIRegex: p.URIRegex,
|
||||
Level: p.Level,
|
||||
OneTime: p.OneTime,
|
||||
Duration: d,
|
||||
})
|
||||
}
|
||||
policyStr, err := getApplicationPolicy(appPolicies, entrances)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to encode json err=%v", err)
|
||||
} else if len(policyStr) > 0 {
|
||||
settings[applicationSettingsPolicyKey] = policyStr
|
||||
}
|
||||
settings["source"] = api.System.String()
|
||||
mobileSupported, ok := deployment.GetAnnotations()[constants.ApplicationMobileSupported]
|
||||
settings["mobileSupported"] = "false"
|
||||
if ok {
|
||||
settings["mobileSupported"] = mobileSupported
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) clearHelmHistory(appname, namespace string) error {
|
||||
actionConfig, _, err := helm.InitConfig(r.Kubeconfig, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Infof("clearHelmHistory: appname:%s, namespace:%s", appname, namespace)
|
||||
|
||||
histClient := action.NewHistory(actionConfig)
|
||||
histClient.Max = 1
|
||||
_, err = histClient.Run(appname)
|
||||
klog.Infof("appname in clearHelmHistory: %v", appname)
|
||||
klog.Infof("err in clearHelmHistory: err=%v", err)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, driver.ErrReleaseNotFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return helm.UninstallCharts(actionConfig, appname)
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) getAppPorts(ctx context.Context, deployment client.Object, isMultiApp bool) (map[string][]appv1alpha1.ServicePort, error) {
|
||||
portsLabel := deployment.GetAnnotations()[constants.ApplicationPortsKey]
|
||||
portsMap := make(map[string][]appv1alpha1.ServicePort)
|
||||
if len(portsLabel) == 0 {
|
||||
return portsMap, errors.New("invalid service port")
|
||||
}
|
||||
var err error
|
||||
if isMultiApp {
|
||||
err = json.Unmarshal([]byte(portsLabel), &portsMap)
|
||||
if err != nil {
|
||||
klog.Errorf("unmarshal portMap err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
appName := deployment.GetLabels()[constants.ApplicationNameLabel]
|
||||
ports := make([]appv1alpha1.ServicePort, 0)
|
||||
err = json.Unmarshal([]byte(portsLabel), &ports)
|
||||
if err != nil {
|
||||
klog.Errorf("unmarshal service port error=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
portsMap[appName] = ports
|
||||
}
|
||||
return portsMap, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) getAppTailScale(deployment client.Object) (*appv1alpha1.TailScale, error) {
|
||||
tailScale := appv1alpha1.TailScale{}
|
||||
tailScaleString := deployment.GetAnnotations()[constants.ApplicationTailScaleKey]
|
||||
if len(tailScaleString) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
err := json.Unmarshal([]byte(tailScaleString), &tailScale)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tailScale, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationReconciler) calAppState(status *appv1alpha1.ApplicationStatus) string {
|
||||
entranceLen := len(status.EntranceStatuses)
|
||||
klog.Infof("entranceLen: %v", entranceLen)
|
||||
if entranceLen == 0 {
|
||||
return "running"
|
||||
}
|
||||
for _, es := range status.EntranceStatuses {
|
||||
if es.State == appv1alpha1.EntranceStopped {
|
||||
return "stopped"
|
||||
}
|
||||
if es.State == appv1alpha1.EntranceNotReady {
|
||||
return "notReady"
|
||||
}
|
||||
}
|
||||
return "running"
|
||||
}
|
||||
|
||||
func checkPortOfService(s *corev1.Service, port int32) bool {
|
||||
for _, p := range s.Spec.Ports {
|
||||
if p.Port == port {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func fmtAppName(name, namespace string) string {
|
||||
return appv1alpha1.AppResourceName(name, namespace)
|
||||
}
|
||||
|
||||
func isApp(obs ...metav1.Object) bool {
|
||||
for _, o := range obs {
|
||||
|
||||
if o.GetLabels() == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := o.GetLabels()[constants.ApplicationNameLabel]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isWorkflow(obs ...metav1.Object) bool {
|
||||
for _, o := range obs {
|
||||
|
||||
if o.GetLabels() == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := o.GetLabels()[constants.WorkflowNameLabel]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getApplicationPolicy(policies []appcfg.AppPolicy, entrances []appv1alpha1.Entrance) (string, error) {
|
||||
subPolicy := make(map[string][]*applicationSettingsSubPolicy)
|
||||
|
||||
for _, p := range policies {
|
||||
subPolicy[p.EntranceName] = append(subPolicy[p.EntranceName],
|
||||
&applicationSettingsSubPolicy{
|
||||
URI: p.URIRegex,
|
||||
Policy: p.Level,
|
||||
OneTime: p.OneTime,
|
||||
Duration: int32(p.Duration / time.Second),
|
||||
})
|
||||
}
|
||||
|
||||
policy := make(map[string]applicationSettingsPolicy)
|
||||
for _, e := range entrances {
|
||||
defaultPolicy := "system"
|
||||
sp := subPolicy[e.Name]
|
||||
if e.AuthLevel == constants.AuthorizationLevelOfPublic {
|
||||
defaultPolicy = constants.AuthorizationLevelOfPublic
|
||||
}
|
||||
policy[e.Name] = applicationSettingsPolicy{
|
||||
DefaultPolicy: defaultPolicy,
|
||||
OneTime: false,
|
||||
Duration: 0,
|
||||
SubPolicies: sp,
|
||||
}
|
||||
}
|
||||
|
||||
policyStr, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(policyStr), nil
|
||||
}
|
||||
|
||||
func getEntranceFromAnnotations(deployment client.Object) ([]appv1alpha1.Entrance, error) {
|
||||
entrancesLabel := deployment.GetAnnotations()[constants.ApplicationEntrancesKey]
|
||||
entrances := make([]appv1alpha1.Entrance, 0)
|
||||
|
||||
if len(entrancesLabel) == 0 {
|
||||
return entrances, errors.New("invalid service address label")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(entrancesLabel), &entrances); err != nil {
|
||||
return entrances, err
|
||||
}
|
||||
for i, e := range entrances {
|
||||
if e.OpenMethod == "" {
|
||||
entrances[i].OpenMethod = "default"
|
||||
}
|
||||
}
|
||||
|
||||
return entrances, nil
|
||||
}
|
||||
|
||||
func getAppName(deployment client.Object) []string {
|
||||
names := make([]string, 0)
|
||||
isMultiApp := deployment.GetLabels()[constants.ApplicationAppGroupLabel] == "true"
|
||||
if isMultiApp {
|
||||
apps := make(map[string]interface{})
|
||||
keys := deployment.GetAnnotations()[constants.ApplicationEntrancesKey]
|
||||
if keys == "" {
|
||||
klog.Infof("Application entrances label is empty")
|
||||
return nil
|
||||
}
|
||||
// multi-app in one deployment/statefulset, get all app names
|
||||
err := json.Unmarshal([]byte(keys), &apps)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to unmarshal application entrances label err=%v", err)
|
||||
return nil
|
||||
}
|
||||
for k := range apps {
|
||||
names = append(names, k)
|
||||
}
|
||||
return names
|
||||
}
|
||||
name := deployment.GetLabels()[constants.ApplicationNameLabel]
|
||||
if name == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
func getAppIcon(deployment client.Object) map[string]string {
|
||||
ret := make(map[string]string)
|
||||
if deployment.GetLabels()[constants.ApplicationAppGroupLabel] == "true" {
|
||||
err := json.Unmarshal([]byte(deployment.GetAnnotations()[constants.ApplicationIconLabel]), &ret)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to unmarshal application icon label err=%v", err)
|
||||
}
|
||||
} else {
|
||||
ret[deployment.GetLabels()[constants.ApplicationNameLabel]] = deployment.GetAnnotations()[constants.ApplicationIconLabel]
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func getAppVersion(deployment client.Object) map[string]string {
|
||||
ret := make(map[string]string)
|
||||
if deployment.GetLabels()[constants.ApplicationAppGroupLabel] == "true" {
|
||||
err := json.Unmarshal([]byte(deployment.GetAnnotations()[constants.ApplicationVersionLabel]), &ret)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to unmarshal application icon label err=%v", err)
|
||||
}
|
||||
} else {
|
||||
ret[deployment.GetLabels()[constants.ApplicationNameLabel]] = deployment.GetAnnotations()[constants.ApplicationVersionLabel]
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func getAppTitle(deployment client.Object) map[string]string {
|
||||
ret := make(map[string]string)
|
||||
if deployment.GetLabels()[constants.ApplicationAppGroupLabel] == "true" {
|
||||
err := json.Unmarshal([]byte(deployment.GetAnnotations()[constants.ApplicationTitleLabel]), &ret)
|
||||
if err != nil {
|
||||
klog.Infof("Failed to unmarshal application icon label err=%v", err)
|
||||
}
|
||||
} else {
|
||||
ret[deployment.GetLabels()[constants.ApplicationNameLabel]] = deployment.GetAnnotations()[constants.ApplicationTitleLabel]
|
||||
}
|
||||
return ret
|
||||
}
|
||||
222
framework/app-service/controllers/appmgr_controller.go
Normal file
222
framework/app-service/controllers/appmgr_controller.go
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/images"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
// ApplicationManagerController represents a controller for managing the lifecycle of applicationmanager.
|
||||
type ApplicationManagerController struct {
|
||||
client.Client
|
||||
KubeConfig *rest.Config
|
||||
ImageClient images.ImageManager
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the ApplicationManagerController with the provided controller manager
|
||||
func (r *ApplicationManagerController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("app-manager-controller", mgr, controller.Options{
|
||||
MaxConcurrentReconciles: 1,
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("app manager setup failed %w", err)
|
||||
}
|
||||
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&appv1alpha1.ApplicationManager{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, h *appv1alpha1.ApplicationManager) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: h.GetName(),
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*appv1alpha1.ApplicationManager]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*appv1alpha1.ApplicationManager]) bool {
|
||||
return r.preEnqueueCheckForCreate(e.Object)
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*appv1alpha1.ApplicationManager]) bool {
|
||||
return r.preEnqueueCheckForUpdate(e.ObjectOld, e.ObjectNew)
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*appv1alpha1.ApplicationManager]) bool {
|
||||
return true
|
||||
},
|
||||
},
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("add watch failed %w", err)
|
||||
}
|
||||
|
||||
// start auto reconcile the application manager state
|
||||
go wait.Until(r.ReconcileAll, 2*time.Minute, wait.NeverStop)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApplicationManagerController) ReconcileAll() {
|
||||
ctx, cancal := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancal()
|
||||
|
||||
var appManagerList appv1alpha1.ApplicationManagerList
|
||||
err := r.List(ctx, &appManagerList)
|
||||
if err != nil {
|
||||
klog.Errorf("list application manager failed %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, appmgr := range appManagerList.Items {
|
||||
if appmgr.Spec.Type != appv1alpha1.App && appmgr.Spec.Type != appv1alpha1.Middleware {
|
||||
continue
|
||||
}
|
||||
_, err := r.Reconcile(ctx, ctrl.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: appmgr.Name,
|
||||
}})
|
||||
if err != nil {
|
||||
klog.Error("reconcile application manager error, ", err, ", ", appmgr.Name)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
} // end of app mgr list loop
|
||||
}
|
||||
|
||||
// Reconcile implements the reconciliation loop for the ApplicationManagerController
|
||||
func (r *ApplicationManagerController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.Infof("reconcile application manager request name=%s", req.Name)
|
||||
statefulApp, err := r.loadStatefulAppAndReconcile(ctx, req.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("load stateful app failed in reconcile %v", err)
|
||||
return ctrl.Result{RequeueAfter: 2 * time.Second}, err
|
||||
}
|
||||
|
||||
if operation, ok := statefulApp.(appstate.OperationApp); ok {
|
||||
klog.Info("stateful app is doing something, ", statefulApp.State())
|
||||
if operation.IsTimeout() {
|
||||
klog.Errorf("stateful app is timeout: %v, state:%v", req.Name, statefulApp.State())
|
||||
err := operation.Cancel(ctx)
|
||||
if err != nil {
|
||||
klog.Info("cancel stateful app operation error, ", err, ", ", statefulApp.GetManager().Name)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
inProgress, err := operation.Exec(ctx)
|
||||
if err != nil {
|
||||
klog.Error("execute stateful app operation error, ", err, ", ", statefulApp.GetManager().Name, ", ", statefulApp.State())
|
||||
|
||||
if waiting, ok := err.(appstate.RequeueError); ok {
|
||||
// if the error is a requeue error, we should requeue the request
|
||||
return ctrl.Result{RequeueAfter: waiting.RequeueAfter()}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if inProgress != nil {
|
||||
if pollable, ok := inProgress.(appstate.PollableStatefulInProgressApp); ok {
|
||||
// use background context to wait for the operation to finish
|
||||
// current context `ctx` controlled by the app mgr controller
|
||||
c := pollable.CreatePollContext()
|
||||
pollable.WaitAsync(c)
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
var state string
|
||||
if statefulApp != nil {
|
||||
state = statefulApp.State()
|
||||
}
|
||||
|
||||
klog.Infof("reconciled application manager request name=%s state=%s", req.Name, state)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationManagerController) preEnqueueCheckForCreate(obj client.Object) bool {
|
||||
am, _ := obj.(*appv1alpha1.ApplicationManager)
|
||||
if am.Spec.Type != appv1alpha1.App && am.Spec.Type != appv1alpha1.Middleware {
|
||||
return false
|
||||
}
|
||||
return am.Status.State != ""
|
||||
}
|
||||
|
||||
func (r *ApplicationManagerController) preEnqueueCheckForUpdate(old, new client.Object) bool {
|
||||
oldAppMgr, _ := old.(*appv1alpha1.ApplicationManager)
|
||||
curAppMgr, _ := new.(*appv1alpha1.ApplicationManager)
|
||||
|
||||
if curAppMgr.Spec.Type != appv1alpha1.App && curAppMgr.Spec.Type != appv1alpha1.Middleware {
|
||||
return false
|
||||
}
|
||||
if curAppMgr.Status.OpGeneration <= oldAppMgr.Status.OpGeneration {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *ApplicationManagerController) loadStatefulAppAndReconcile(ctx context.Context, name string) (appstate.StatefulApp, error) {
|
||||
statefulApp, err := LoadStatefulApp(ctx, r, name)
|
||||
if err != nil {
|
||||
klog.Errorf("load stateful app failed %v", err)
|
||||
|
||||
switch {
|
||||
case appstate.IsUnknownState(err):
|
||||
if srfunc := err.StateReconcile(); srfunc != nil {
|
||||
err := srfunc(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("reconcile stateful app failed %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return statefulApp, nil
|
||||
}
|
||||
case appstate.IsUnknownInProgressApp(err):
|
||||
// this is a special case, the app is in progress but the state is unknown
|
||||
err.CleanUp(ctx)
|
||||
}
|
||||
|
||||
// return error to the controller-runtime, and re-enqueue the request
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return statefulApp, nil
|
||||
}
|
||||
|
||||
func (r *ApplicationManagerController) updateStatus(ctx context.Context, am *appv1alpha1.ApplicationManager, state appv1alpha1.ApplicationManagerState) error {
|
||||
err := r.Get(ctx, types.NamespacedName{Name: am.Name}, am)
|
||||
if err != nil {
|
||||
klog.Errorf("get app manager %s failed %v", am.Name, err)
|
||||
return nil
|
||||
}
|
||||
now := metav1.Now()
|
||||
amCopy := am.DeepCopy()
|
||||
amCopy.Status.State = state
|
||||
amCopy.Status.StatusTime = &now
|
||||
amCopy.Status.UpdateTime = &now
|
||||
amCopy.Status.OpGeneration += 1
|
||||
err = r.Patch(ctx, amCopy, client.MergeFrom(am))
|
||||
if err != nil {
|
||||
klog.Errorf("update app manager %s status failed %v", am.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
344
framework/app-service/controllers/appmgr_controller_test.go
Normal file
344
framework/app-service/controllers/appmgr_controller_test.go
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
package controllers
|
||||
|
||||
//
|
||||
//import (
|
||||
// "bytetrade.io/web3os/app-service/pkg/appinstaller"
|
||||
// "bytetrade.io/web3os/app-service/pkg/kubesphere"
|
||||
// "context"
|
||||
// "encoding/json"
|
||||
// "github.com/agiledragon/gomonkey/v2"
|
||||
// . "github.com/onsi/ginkgo/v2"
|
||||
// . "github.com/onsi/gomega"
|
||||
// "k8s.io/apimachinery/pkg/types"
|
||||
// "k8s.io/client-go/kubernetes/scheme"
|
||||
// "k8s.io/client-go/rest"
|
||||
// ctrl "sigs.k8s.io/controller-runtime"
|
||||
// "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
// "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
// "sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
//
|
||||
// appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
// appsv1 "k8s.io/api/apps/v1"
|
||||
// corev1 "k8s.io/api/core/v1"
|
||||
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// "k8s.io/apimachinery/pkg/runtime"
|
||||
//)
|
||||
//
|
||||
//type mockImageManager struct{}
|
||||
//
|
||||
//func (m *mockImageManager) UpdateStatus(ctx context.Context, name, state, message string) error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (m *mockImageManager) Create(ctx context.Context, am *appv1alpha1.ApplicationManager, refs []appv1alpha1.Ref) error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (m *mockImageManager) PollDownloadProgress(ctx context.Context, am *appv1alpha1.ApplicationManager) error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//var _ = Describe("ApplicationManagerController", func() {
|
||||
// var (
|
||||
// ctx context.Context
|
||||
// cancel context.CancelFunc
|
||||
// k8sClient client.Client
|
||||
// controller *ApplicationManagerController
|
||||
// testScheme *runtime.Scheme
|
||||
// mockImgClient *mockImageManager
|
||||
// )
|
||||
// var patchForgetUsername *gomonkey.Patches
|
||||
// var patchForHandleDownloading *gomonkey.Patches
|
||||
// var patchForhandleDownloading *gomonkey.Patches
|
||||
//
|
||||
// var patchForHandleInstalling *gomonkey.Patches
|
||||
// var patchForHandleInitializing *gomonkey.Patches
|
||||
//
|
||||
// BeforeEach(func() {
|
||||
// ctx, cancel = context.WithCancel(context.Background())
|
||||
//
|
||||
// testScheme = runtime.NewScheme()
|
||||
// Expect(scheme.AddToScheme(testScheme)).To(Succeed())
|
||||
// Expect(appv1alpha1.AddToScheme(testScheme)).To(Succeed())
|
||||
// Expect(corev1.AddToScheme(testScheme)).To(Succeed())
|
||||
// Expect(appsv1.AddToScheme(testScheme)).To(Succeed())
|
||||
//
|
||||
// // 创建一个模拟的 K8s 客户端
|
||||
// k8sClient = fake.NewClientBuilder().WithScheme(testScheme).Build()
|
||||
//
|
||||
// patchForgetUsername = gomonkey.ApplyFunc(kubesphere.GetAdminUsername, func(_ context.Context, _ *rest.Config) (string, error) {
|
||||
// return "admin", nil
|
||||
// })
|
||||
//
|
||||
// // 创建模拟的 ImageManager
|
||||
// mockImgClient = &mockImageManager{}
|
||||
//
|
||||
// // 创建控制器
|
||||
// controller = &ApplicationManagerController{
|
||||
// Client: k8sClient,
|
||||
// KubeConfig: &rest.Config{},
|
||||
// ImageClient: mockImgClient,
|
||||
// }
|
||||
// patchForHandleDownloading = gomonkey.ApplyFunc(controller.HandleDownloading, func(_ context.Context, _ *appv1alpha1.ApplicationManager) error {
|
||||
// return nil
|
||||
// })
|
||||
// patchForhandleDownloading = gomonkey.ApplyFunc(controller.handleDownloading, func(_ context.Context, _ *appv1alpha1.ApplicationManager) error {
|
||||
// return nil
|
||||
// })
|
||||
//
|
||||
// patchForHandleInstalling = gomonkey.ApplyFunc(controller.HandleInstalling, func(_ context.Context, _ *appv1alpha1.ApplicationManager) error {
|
||||
// return nil
|
||||
// })
|
||||
// patchForHandleInitializing = gomonkey.ApplyFunc(controller.HandleInitializing, func(_ context.Context, _ *appv1alpha1.ApplicationManager) error {
|
||||
// return nil
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// AfterEach(func() {
|
||||
// cancel()
|
||||
// patchForgetUsername.Reset()
|
||||
// patchForHandleDownloading.Reset()
|
||||
// patchForhandleDownloading.Reset()
|
||||
// patchForHandleInstalling.Reset()
|
||||
// patchForHandleInitializing.Reset()
|
||||
// })
|
||||
//
|
||||
// Context("Reconcile", func() {
|
||||
// It("should handle non-existent ApplicationManager", func() {
|
||||
// req := reconcile.Request{
|
||||
// NamespacedName: types.NamespacedName{
|
||||
// Name: "non-existent",
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// result, err := controller.Reconcile(ctx, req)
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
// Expect(result).To(Equal(ctrl.Result{}))
|
||||
// })
|
||||
//
|
||||
// It("should handle ApplicationManager in Pending state", func() {
|
||||
// am := &appv1alpha1.ApplicationManager{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// Spec: appv1alpha1.ApplicationManagerSpec{
|
||||
// AppName: "test-app",
|
||||
// AppNamespace: "default",
|
||||
// AppOwner: "test-owner",
|
||||
// Type: appv1alpha1.App,
|
||||
// Config: `{"chartsName": "../testdata/windows"}`,
|
||||
// },
|
||||
// Status: appv1alpha1.ApplicationManagerStatus{
|
||||
// State: appv1alpha1.Pending,
|
||||
// },
|
||||
// }
|
||||
// Expect(k8sClient.Create(ctx, am)).To(Succeed())
|
||||
//
|
||||
// node := &corev1.Node{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-node",
|
||||
// },
|
||||
// Status: corev1.NodeStatus{
|
||||
// Conditions: []corev1.NodeCondition{
|
||||
// {
|
||||
// Type: corev1.NodeReady,
|
||||
// Status: corev1.ConditionTrue,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// Expect(k8sClient.Create(ctx, node)).To(Succeed())
|
||||
//
|
||||
// req := reconcile.Request{
|
||||
// NamespacedName: types.NamespacedName{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// result, err := controller.Reconcile(ctx, req)
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
// Expect(result).To(Equal(ctrl.Result{}))
|
||||
//
|
||||
// updatedAm := &appv1alpha1.ApplicationManager{}
|
||||
// Expect(k8sClient.Get(ctx, types.NamespacedName{Name: "test-app"}, updatedAm)).To(Succeed())
|
||||
// Expect(updatedAm.Status.State).To(Equal(appv1alpha1.Downloading))
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// Context("handleDownloading", func() {
|
||||
// It("should handle downloading state", func() {
|
||||
// appConfig := &appinstaller.ApplicationConfig{
|
||||
// AppName: "test-app",
|
||||
// Namespace: "default",
|
||||
// OwnerName: "test-owner",
|
||||
// ChartsName: "test-chart",
|
||||
// }
|
||||
// configBytes, err := json.Marshal(appConfig)
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
//
|
||||
// am := &appv1alpha1.ApplicationManager{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// Spec: appv1alpha1.ApplicationManagerSpec{
|
||||
// AppName: "test-app",
|
||||
// AppNamespace: "default",
|
||||
// AppOwner: "test-owner",
|
||||
// Type: appv1alpha1.App,
|
||||
// Config: string(configBytes),
|
||||
// },
|
||||
// Status: appv1alpha1.ApplicationManagerStatus{
|
||||
// State: appv1alpha1.Downloading,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// node := &corev1.Node{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-node",
|
||||
// },
|
||||
// Status: corev1.NodeStatus{
|
||||
// Conditions: []corev1.NodeCondition{
|
||||
// {
|
||||
// Type: corev1.NodeReady,
|
||||
// Status: corev1.ConditionTrue,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// Expect(k8sClient.Create(ctx, node)).To(Succeed())
|
||||
//
|
||||
// im := &appv1alpha1.ImageManager{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// Spec: appv1alpha1.ImageManagerSpec{
|
||||
// AppName: "test-app",
|
||||
// AppNamespace: "default",
|
||||
// AppOwner: "test-owner",
|
||||
// Nodes: []string{"test-node"},
|
||||
// },
|
||||
// Status: appv1alpha1.ImageManagerStatus{
|
||||
// State: "completed",
|
||||
// Message: "success",
|
||||
// Conditions: map[string]map[string]map[string]string{},
|
||||
// },
|
||||
// }
|
||||
// Expect(k8sClient.Create(ctx, im)).To(Succeed())
|
||||
//
|
||||
// err = controller.HandleDownloading(ctx, am)
|
||||
// if err != nil {
|
||||
// Expect(err.Error()).To(ContainSubstring("failed to get image refs"))
|
||||
// }
|
||||
//
|
||||
// Expect(am.Status.State).To(Equal(appv1alpha1.Installing))
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// Context("handleInstalling", func() {
|
||||
// It("should handle installing state", func() {
|
||||
// appConfig := &appinstaller.ApplicationConfig{
|
||||
// AppName: "test-app",
|
||||
// Namespace: "default",
|
||||
// OwnerName: "test-owner",
|
||||
// ChartsName: "test-chart",
|
||||
// }
|
||||
// configBytes, err := json.Marshal(appConfig)
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
//
|
||||
// am := &appv1alpha1.ApplicationManager{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// Spec: appv1alpha1.ApplicationManagerSpec{
|
||||
// AppName: "test-app",
|
||||
// AppNamespace: "default",
|
||||
// AppOwner: "test-owner",
|
||||
// Type: appv1alpha1.App,
|
||||
// Config: string(configBytes),
|
||||
// },
|
||||
// Status: appv1alpha1.ApplicationManagerStatus{
|
||||
// State: appv1alpha1.Installing,
|
||||
// Payload: map[string]string{
|
||||
// "token": "test-token",
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// err = controller.HandleInstalling(ctx, am)
|
||||
// if err != nil {
|
||||
// Expect(err.Error()).To(ContainSubstring("failed to create helm ops"))
|
||||
// }
|
||||
//
|
||||
//
|
||||
// Expect(am.Status.State).To(Equal(appv1alpha1.Initializing))
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// Context("handleInitializing", func() {
|
||||
// It("should handle initializing state", func() {
|
||||
// appConfig := &appinstaller.ApplicationConfig{
|
||||
// AppName: "test-app",
|
||||
// Namespace: "default",
|
||||
// OwnerName: "test-owner",
|
||||
// ChartsName: "test-chart",
|
||||
// }
|
||||
// configBytes, err := json.Marshal(appConfig)
|
||||
// Expect(err).NotTo(HaveOccurred())
|
||||
//
|
||||
// am := &appv1alpha1.ApplicationManager{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// Spec: appv1alpha1.ApplicationManagerSpec{
|
||||
// AppName: "test-app",
|
||||
// AppNamespace: "default",
|
||||
// AppOwner: "test-owner",
|
||||
// Type: appv1alpha1.App,
|
||||
// Config: string(configBytes),
|
||||
// },
|
||||
// Status: appv1alpha1.ApplicationManagerStatus{
|
||||
// State: appv1alpha1.Initializing,
|
||||
// Payload: map[string]string{
|
||||
// "token": "test-token",
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// err = controller.HandleInitializing(ctx, am)
|
||||
// if err != nil {
|
||||
// Expect(err.Error()).To(ContainSubstring("failed to create helm ops"))
|
||||
// }
|
||||
//
|
||||
// Expect(am.Status.State).To(Equal(appv1alpha1.Running))
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// Context("handleUninstalling", func() {
|
||||
// It("should handle uninstalling state", func() {
|
||||
// am := &appv1alpha1.ApplicationManager{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "test-app",
|
||||
// },
|
||||
// Spec: appv1alpha1.ApplicationManagerSpec{
|
||||
// AppName: "test-app",
|
||||
// AppNamespace: "default",
|
||||
// AppOwner: "test-owner",
|
||||
// Type: appv1alpha1.App,
|
||||
// },
|
||||
// Status: appv1alpha1.ApplicationManagerStatus{
|
||||
// State: appv1alpha1.Uninstalling,
|
||||
// Payload: map[string]string{
|
||||
// "token": "test-token",
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// err := controller.HandleUninstalling(ctx, am)
|
||||
// if err != nil {
|
||||
// Expect(err.Error()).To(ContainSubstring("failed to create helm ops"))
|
||||
// }
|
||||
//
|
||||
// Expect(am.Status.State).To(Equal(appv1alpha1.Uninstalled))
|
||||
// })
|
||||
// })
|
||||
//})
|
||||
396
framework/app-service/controllers/entrancestatus_controller.go
Normal file
396
framework/app-service/controllers/entrancestatus_controller.go
Normal file
|
|
@ -0,0 +1,396 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
appevent "bytetrade.io/web3os/app-service/pkg/event"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
deployment = "Deployment"
|
||||
statefulSet = "StatefulSet"
|
||||
replicaSet = "ReplicaSet"
|
||||
)
|
||||
|
||||
type ReasonedMessage struct {
|
||||
Reason string
|
||||
Message string
|
||||
}
|
||||
|
||||
// EntranceStatusManagerController manages the status of app entrance
|
||||
type EntranceStatusManagerController struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) SetUpWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("entrance-status-manager-controller", mgr, controller.Options{
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&corev1.Pod{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, pod *corev1.Pod) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*corev1.Pod]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*corev1.Pod]) bool {
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Pod]) bool {
|
||||
return r.preEnqueueCheckForUpdate(e.ObjectOld, e.ObjectNew)
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Pod]) bool {
|
||||
return false
|
||||
},
|
||||
},
|
||||
))
|
||||
if err != nil {
|
||||
klog.Errorf("entrance-status-manager-controller failed to watch err=%v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
klog.Infof("reconcile entrance-status-manager request name=%v", req.Name)
|
||||
var pod corev1.Pod
|
||||
err := r.Get(ctx, req.NamespacedName, &pod)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.updateEntranceStatus(ctx, &pod)
|
||||
if err != nil {
|
||||
klog.Errorf("update entrance status err=%v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) preEnqueueCheckForUpdate(_, new client.Object) bool {
|
||||
pod, _ := new.(*corev1.Pod)
|
||||
if _, ok := pod.Labels["io.bytetrade.app"]; ok {
|
||||
klog.Infof("Pod.Name=%v, olares-app=%v", pod.Name, pod.Labels["io.bytetrade.app"])
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) getStsOrDeploymentReplicasByPod(ctx context.Context, pod *corev1.Pod) (replicas int32, labelSelector *metav1.LabelSelector, err error) {
|
||||
replicas = 1
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return replicas, nil, nil
|
||||
}
|
||||
var kind, name string
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
switch ownerRef.Kind {
|
||||
case replicaSet:
|
||||
key := types.NamespacedName{Namespace: pod.Namespace, Name: ownerRef.Name}
|
||||
var rs appsv1.ReplicaSet
|
||||
err = r.Get(ctx, key, &rs)
|
||||
if err != nil {
|
||||
return replicas, nil, err
|
||||
}
|
||||
if len(rs.OwnerReferences) > 0 && rs.OwnerReferences[0].Kind == deployment {
|
||||
kind = deployment
|
||||
name = rs.OwnerReferences[0].Name
|
||||
}
|
||||
case statefulSet:
|
||||
kind = statefulSet
|
||||
name = ownerRef.Name
|
||||
}
|
||||
if kind == "" {
|
||||
return replicas, nil, nil
|
||||
}
|
||||
switch kind {
|
||||
case deployment:
|
||||
var deploy appsv1.Deployment
|
||||
key := types.NamespacedName{Name: name, Namespace: pod.Namespace}
|
||||
err = r.Get(ctx, key, &deploy)
|
||||
if err != nil {
|
||||
return replicas, nil, err
|
||||
}
|
||||
deployCopy := deploy.DeepCopy()
|
||||
labelSelector = deploy.Spec.Selector
|
||||
replicas = *deployCopy.Spec.Replicas
|
||||
case statefulSet:
|
||||
var sts appsv1.StatefulSet
|
||||
key := types.NamespacedName{Name: name, Namespace: pod.Namespace}
|
||||
err = r.Get(ctx, key, &sts)
|
||||
if err != nil {
|
||||
return replicas, nil, err
|
||||
}
|
||||
stsCopy := sts.DeepCopy()
|
||||
labelSelector = sts.Spec.Selector
|
||||
replicas = *stsCopy.Spec.Replicas
|
||||
|
||||
}
|
||||
return replicas, labelSelector, nil
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) updateEntranceStatus(ctx context.Context, pod *corev1.Pod) error {
|
||||
namespace := pod.Namespace
|
||||
var apps v1alpha1.ApplicationList
|
||||
err := r.List(ctx, &apps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
type appInfo struct {
|
||||
name string
|
||||
startedTime *metav1.Time
|
||||
entranceName string
|
||||
}
|
||||
filteredApp := make([]appInfo, 0)
|
||||
|
||||
for _, a := range apps.Items {
|
||||
if a.Spec.Namespace != namespace {
|
||||
continue
|
||||
}
|
||||
for _, e := range a.Spec.Entrances {
|
||||
// skip entrances explicitly marked to be ignored
|
||||
if e.Skip {
|
||||
continue
|
||||
}
|
||||
isSelected, err := r.isEntrancePod(ctx, pod, e.Host, namespace)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
if isSelected {
|
||||
filteredApp = append(filteredApp, appInfo{
|
||||
name: a.Name,
|
||||
startedTime: a.Status.StartedTime,
|
||||
entranceName: e.Name,
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
appEntranceMap := make(map[string][]string)
|
||||
for _, a := range filteredApp {
|
||||
appEntranceMap[a.name] = append(appEntranceMap[a.name], a.entranceName)
|
||||
}
|
||||
|
||||
for appName, entranceNames := range appEntranceMap {
|
||||
var selectedApp v1alpha1.Application
|
||||
err = r.Get(ctx, types.NamespacedName{Name: appName}, &selectedApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
appCopy := selectedApp.DeepCopy()
|
||||
entranceState, rm, err := r.calEntranceState(ctx, pod)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to cal entrance state %v", err)
|
||||
return err
|
||||
}
|
||||
for _, entranceName := range entranceNames {
|
||||
for i := len(appCopy.Status.EntranceStatuses) - 1; i >= 0; i-- {
|
||||
if appCopy.Status.EntranceStatuses[i].Name == entranceName {
|
||||
|
||||
appCopy.Status.EntranceStatuses[i].State = entranceState
|
||||
appCopy.Status.EntranceStatuses[i].Reason = rm.Reason
|
||||
appCopy.Status.EntranceStatuses[i].Message = rm.Message
|
||||
now := metav1.Now()
|
||||
appCopy.Status.EntranceStatuses[i].StatusTime = &now
|
||||
}
|
||||
}
|
||||
}
|
||||
patchApp := client.MergeFrom(&selectedApp)
|
||||
err = r.Status().Patch(ctx, appCopy, patchApp)
|
||||
klog.Infof("updateEntrances ...:name: %v", appCopy.Name)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("failed to patch err=%v", err)
|
||||
return err
|
||||
}
|
||||
var am v1alpha1.ApplicationManager
|
||||
err = r.Get(ctx, types.NamespacedName{Name: selectedApp.Name}, &am)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Errorf("failed to get am name=%s, err=%v", selectedApp.Name, err)
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
klog.Infof("pushevent app Reason: %s", am.Status.Reason)
|
||||
appevent.PublishAppEventToQueue(utils.EventParams{
|
||||
Owner: appCopy.Spec.Owner,
|
||||
Name: appCopy.Spec.Name,
|
||||
OpType: "",
|
||||
OpID: "",
|
||||
State: am.Status.State.String(),
|
||||
EntranceStatuses: appCopy.Status.EntranceStatuses,
|
||||
RawAppName: appCopy.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: app.AppTitle(am.Spec.Config),
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) isEntrancePod(ctx context.Context, pod *corev1.Pod, svcName, namespace string) (bool, error) {
|
||||
var svc corev1.Service
|
||||
key := types.NamespacedName{Namespace: namespace, Name: svcName}
|
||||
err := r.Get(ctx, key, &svc)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
selector, err := labels.ValidatedSelectorFromSet(svc.Spec.Selector)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isSelected := selector.Matches(labels.Set(pod.GetLabels()))
|
||||
return isSelected, nil
|
||||
}
|
||||
|
||||
func (r *EntranceStatusManagerController) calEntranceState(ctx context.Context, pod *corev1.Pod) (v1alpha1.EntranceState, ReasonedMessage, error) {
|
||||
var message string
|
||||
reason := string(pod.Status.Phase)
|
||||
|
||||
replicas, labelSelector, err := r.getStsOrDeploymentReplicasByPod(ctx, pod)
|
||||
if err != nil {
|
||||
klog.Error("get sts or deployment replicas error, ", err, ", ", pod.Namespace, "/", pod.Name)
|
||||
return "", ReasonedMessage{Reason: reason}, err
|
||||
}
|
||||
if replicas == 0 {
|
||||
reason = "stopped"
|
||||
return v1alpha1.EntranceStopped, ReasonedMessage{
|
||||
Reason: reason,
|
||||
}, nil
|
||||
}
|
||||
var state v1alpha1.EntranceState
|
||||
if labelSelector == nil {
|
||||
state, reason, message = makeEntranceState(pod)
|
||||
return state, ReasonedMessage{Reason: reason, Message: message}, nil
|
||||
}
|
||||
|
||||
var podList corev1.PodList
|
||||
err = r.List(ctx, &podList, client.InNamespace(pod.Namespace), client.MatchingLabels(labelSelector.MatchLabels))
|
||||
if err != nil {
|
||||
klog.Error("failed to list pods, err=", err, ", ", pod.Namespace, ", ", labelSelector.MatchLabels)
|
||||
return state, ReasonedMessage{}, err
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
state, reason, message = makeEntranceState(&pod)
|
||||
if state == v1alpha1.EntranceRunning {
|
||||
return state, ReasonedMessage{
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return state, ReasonedMessage{
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeEntranceState(pod *corev1.Pod) (v1alpha1.EntranceState, string, string) {
|
||||
var reason, message string
|
||||
reason = string(pod.Status.Phase)
|
||||
if pod.Status.Reason != "" {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
if pod.Status.Message != "" {
|
||||
message = pod.Status.Message
|
||||
}
|
||||
initializing := false
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
switch {
|
||||
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||
continue
|
||||
case container.State.Terminated != nil:
|
||||
if len(container.State.Terminated.Reason) == 0 {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else {
|
||||
reason = "Init:" + container.State.Terminated.Reason
|
||||
}
|
||||
if container.State.Terminated.Message != "" {
|
||||
message = container.State.Terminated.Message
|
||||
}
|
||||
initializing = true
|
||||
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
|
||||
reason = "Init:" + container.State.Waiting.Reason
|
||||
if container.State.Waiting.Message != "" {
|
||||
message = container.State.Waiting.Message
|
||||
}
|
||||
initializing = true
|
||||
default:
|
||||
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
|
||||
initializing = true
|
||||
}
|
||||
break
|
||||
}
|
||||
totalContainers := len(pod.Spec.Containers)
|
||||
readyContainers := 0
|
||||
|
||||
if !initializing {
|
||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
|
||||
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
||||
reason = container.State.Waiting.Reason
|
||||
if container.State.Waiting.Message != "" {
|
||||
message = container.State.Waiting.Message
|
||||
}
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
||||
reason = container.State.Terminated.Reason
|
||||
if container.State.Terminated.Message != "" {
|
||||
message = container.State.Terminated.Message
|
||||
}
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
if container.State.Terminated.Message != "" {
|
||||
message = container.State.Terminated.Message
|
||||
}
|
||||
} else if container.Ready && container.State.Running != nil {
|
||||
readyContainers++
|
||||
}
|
||||
}
|
||||
}
|
||||
if readyContainers == totalContainers && readyContainers != 0 {
|
||||
return v1alpha1.EntranceRunning, reason, message
|
||||
}
|
||||
|
||||
return v1alpha1.EntranceNotReady, reason, message
|
||||
}
|
||||
187
framework/app-service/controllers/eviction_controller.go
Normal file
187
framework/app-service/controllers/eviction_controller.go
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/security"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
// EvictionManagerController manages the status of app entrance
|
||||
type EvictionManagerController struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func (r *EvictionManagerController) SetUpWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("eviction-manager-controller", mgr, controller.Options{
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&corev1.Pod{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, pod *corev1.Pod) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*corev1.Pod]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*corev1.Pod]) bool {
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Pod]) bool {
|
||||
return true
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Pod]) bool {
|
||||
return false
|
||||
},
|
||||
},
|
||||
))
|
||||
if err != nil {
|
||||
klog.Errorf("entrance-status-manager-controller failed to watch err=%v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EvictionManagerController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
klog.Infof("reconcile entrance-status-manager request name=%v", req.Name)
|
||||
var pod corev1.Pod
|
||||
err := r.Get(ctx, req.NamespacedName, &pod)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
nss := append(security.UnderLayerNamespaces, security.GPUSystemNamespaces...)
|
||||
ignoredNs := sets.NewString(nss...)
|
||||
|
||||
podName := pod.GetName()
|
||||
podNamespace := pod.GetNamespace()
|
||||
|
||||
if podNamespace == "" || ignoredNs.Has(podNamespace) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if pod.Status.Reason != "Evicted" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
klog.Infof("pod.Name=%s, pod.Namespace=%s,pod.Status.Reason=%s,message=%s", podName, podNamespace, pod.Status.Reason, pod.Status.Message)
|
||||
|
||||
var nodes corev1.NodeList
|
||||
err = r.List(ctx, &nodes, &client.ListOptions{})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
canScheduleNodes := 0
|
||||
for _, node := range nodes.Items {
|
||||
if utils.IsNodeReady(&node) && !node.Spec.Unschedulable {
|
||||
canScheduleNodes++
|
||||
}
|
||||
}
|
||||
if canScheduleNodes > 1 {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
appName := pod.GetLabels()[constants.ApplicationNameLabel]
|
||||
owner := pod.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
if appName != "" || owner != "" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
_, err = r.setDeployOrStsReplicas(ctx, podName, podNamespace, int32(0))
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.Delete(ctx, &pod)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EvictionManagerController) setDeployOrStsReplicas(ctx context.Context, podName, namespace string, replicas int32) (bool, error) {
|
||||
var pod corev1.Pod
|
||||
key := types.NamespacedName{Name: podName, Namespace: namespace}
|
||||
err := r.Get(ctx, key, &pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
var kind, name string
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
switch ownerRef.Kind {
|
||||
case "ReplicaSet":
|
||||
key = types.NamespacedName{Namespace: namespace, Name: ownerRef.Name}
|
||||
var rs appsv1.ReplicaSet
|
||||
err = r.Get(ctx, key, &rs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(rs.OwnerReferences) > 0 && rs.OwnerReferences[0].Kind == deployment {
|
||||
kind = deployment
|
||||
name = rs.OwnerReferences[0].Name
|
||||
}
|
||||
case statefulSet:
|
||||
kind = statefulSet
|
||||
name = ownerRef.Name
|
||||
}
|
||||
if kind == "" {
|
||||
return true, nil
|
||||
}
|
||||
switch kind {
|
||||
case deployment:
|
||||
var deploy appsv1.Deployment
|
||||
key = types.NamespacedName{Name: name, Namespace: namespace}
|
||||
err = r.Get(ctx, key, &deploy)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
deployCopy := deploy.DeepCopy()
|
||||
deployCopy.Spec.Replicas = &replicas
|
||||
|
||||
err = r.Patch(ctx, deployCopy, client.MergeFrom(&deploy))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case statefulSet:
|
||||
var sts appsv1.StatefulSet
|
||||
key = types.NamespacedName{Name: name, Namespace: namespace}
|
||||
err = r.Get(ctx, key, &sts)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
stsCopy := sts.DeepCopy()
|
||||
stsCopy.Spec.Replicas = &replicas
|
||||
|
||||
err = r.Patch(ctx, stsCopy, client.MergeFrom(&sts))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
303
framework/app-service/controllers/image_controller.go
Normal file
303
framework/app-service/controllers/image_controller.go
Normal file
|
|
@ -0,0 +1,303 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/images"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
var thisNode string
|
||||
|
||||
func init() {
|
||||
thisNode = os.Getenv("NODE_NAME")
|
||||
}
|
||||
|
||||
// ImageManagerController represents a controller for managing the lifecycle of applicationmanager.
|
||||
type ImageManagerController struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the ImageManagerController with the provided controller manager
|
||||
func (r *ImageManagerController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("image-controller", mgr, controller.Options{
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&appv1alpha1.ImageManager{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, app *appv1alpha1.ImageManager) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: app.Name,
|
||||
Namespace: app.Spec.AppOwner,
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*appv1alpha1.ImageManager]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*appv1alpha1.ImageManager]) bool {
|
||||
return r.preEnqueueCheckForCreate(e.Object)
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*appv1alpha1.ImageManager]) bool {
|
||||
return r.preEnqueueCheckForUpdate(e.ObjectOld, e.ObjectNew)
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*appv1alpha1.ImageManager]) bool {
|
||||
return false
|
||||
},
|
||||
},
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to add watch err=%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var imageManager map[string]context.CancelFunc
|
||||
|
||||
// Reconcile implements the reconciliation loop for the ImageManagerController
|
||||
func (r *ImageManagerController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
ctrl.Log.Info("reconcile image manager request", "name", req.Name)
|
||||
|
||||
var im appv1alpha1.ImageManager
|
||||
err := r.Get(ctx, req.NamespacedName, &im)
|
||||
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "get application manager error", "name", req.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// unexpected error, retry after 5s
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.reconcile(ctx, &im)
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "download image error", "name", req.Name)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
func (r *ImageManagerController) reconcile(ctx context.Context, instance *appv1alpha1.ImageManager) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
defer func() {
|
||||
delete(imageManager, instance.Name)
|
||||
}()
|
||||
var err error
|
||||
|
||||
var cur appv1alpha1.ImageManager
|
||||
err = r.Get(ctx, types.NamespacedName{Name: instance.Name}, &cur)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get imagemanagers name=%s err=%v", instance.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if imageManager == nil {
|
||||
imageManager = make(map[string]context.CancelFunc)
|
||||
}
|
||||
if _, ok := imageManager[instance.Name]; ok {
|
||||
return nil
|
||||
}
|
||||
imageManager[instance.Name] = cancel
|
||||
if cur.Status.State != appv1alpha1.Downloading.String() {
|
||||
err = r.updateStatus(ctx, &cur, appv1alpha1.Downloading.String(), "start downloading")
|
||||
if err != nil {
|
||||
klog.Infof("Failed to update imagemanager status name=%v, err=%v", cur.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = r.download(ctx, cur.Spec.Refs,
|
||||
images.PullOptions{
|
||||
AppName: instance.Spec.AppName,
|
||||
OwnerName: instance.Spec.AppOwner,
|
||||
AppNamespace: instance.Spec.AppNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Infof("download failed err=%v", err)
|
||||
|
||||
state := "failed"
|
||||
if errors.Is(err, context.Canceled) {
|
||||
state = appv1alpha1.DownloadingCanceled.String()
|
||||
}
|
||||
err = r.updateStatus(context.TODO(), instance, state, err.Error())
|
||||
if err != nil {
|
||||
klog.Infof("Failed to update status err=%v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
err = r.updateStatus(context.TODO(), instance, "completed", "image download completed")
|
||||
if err != nil {
|
||||
klog.Infof("Failed to update status err=%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("download app: %s image success", instance.Spec.AppName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ImageManagerController) preEnqueueCheckForCreate(obj client.Object) bool {
|
||||
im, _ := obj.(*appv1alpha1.ImageManager)
|
||||
if im.Status.State == "failed" || im.Status.State == appv1alpha1.DownloadingCanceled.String() ||
|
||||
im.Status.State == "completed" {
|
||||
return false
|
||||
}
|
||||
klog.Infof("enqueue check: %v", im.Status.State)
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *ImageManagerController) preEnqueueCheckForUpdate(old, new client.Object) bool {
|
||||
im, _ := new.(*appv1alpha1.ImageManager)
|
||||
if im.Status.State == appv1alpha1.DownloadingCanceled.String() {
|
||||
go r.cancel(im)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *ImageManagerController) download(ctx context.Context, refs []appv1alpha1.Ref, opts images.PullOptions) (err error) {
|
||||
if len(refs) == 0 {
|
||||
return errors.New("no image to download")
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
var errs error
|
||||
tokens := make(chan struct{}, 3)
|
||||
for _, ref := range refs {
|
||||
wg.Add(1)
|
||||
go func(ref appv1alpha1.Ref) {
|
||||
tokens <- struct{}{}
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
<-tokens
|
||||
}()
|
||||
iClient, ctx, cancel, err := images.NewClient(ctx)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
return
|
||||
}
|
||||
defer cancel()
|
||||
_, err = iClient.PullImage(ctx, ref, opts)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
klog.Infof("pull image failed name=%v err=%v", ref, err)
|
||||
}
|
||||
}(ref)
|
||||
}
|
||||
klog.Infof("waiting image %v to download", refs)
|
||||
wg.Wait()
|
||||
return errs
|
||||
}
|
||||
|
||||
func (r *ImageManagerController) updateStatus(ctx context.Context, im *appv1alpha1.ImageManager, state string, message string) error {
|
||||
var err error
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
err = r.Get(ctx, types.NamespacedName{Name: im.Name}, im)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
imCopy := im.DeepCopy()
|
||||
if state != "completed" {
|
||||
imCopy.Status.State = state
|
||||
}
|
||||
imCopy.Status.Message = message
|
||||
imCopy.Status.StatusTime = &now
|
||||
imCopy.Status.UpdateTime = &now
|
||||
|
||||
if state == "completed" {
|
||||
for _, node := range imCopy.Spec.Nodes {
|
||||
if node != thisNode {
|
||||
continue
|
||||
}
|
||||
if imCopy.Status.Conditions == nil {
|
||||
imCopy.Status.Conditions = make(map[string]map[string]map[string]string)
|
||||
}
|
||||
if imCopy.Status.Conditions[thisNode] == nil {
|
||||
imCopy.Status.Conditions[thisNode] = make(map[string]map[string]string)
|
||||
}
|
||||
for _, ref := range imCopy.Spec.Refs {
|
||||
if _, ok := imCopy.Status.Conditions[thisNode][ref.Name]; !ok {
|
||||
imCopy.Status.Conditions[thisNode][ref.Name] = map[string]string{
|
||||
"offset": "56782302",
|
||||
"total": "56782302",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkAllCompleted := func() bool {
|
||||
for _, node := range imCopy.Spec.Nodes {
|
||||
conditionsNode, ok := imCopy.Status.Conditions[node]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, ref := range imCopy.Spec.Refs {
|
||||
if _, ok := conditionsNode[ref.Name]["offset"]; !ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := conditionsNode[ref.Name]["total"]; !ok {
|
||||
return false
|
||||
}
|
||||
if conditionsNode[ref.Name]["offset"] != conditionsNode[ref.Name]["total"] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
if checkAllCompleted() {
|
||||
klog.Errorf("checkallcompleted............")
|
||||
imCopy.Status.State = state
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = r.Status().Patch(ctx, imCopy, client.MergeFrom(im))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to patch im %s status with state=%s %v", imCopy.Name, imCopy.Status.State, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *ImageManagerController) cancel(im *appv1alpha1.ImageManager) error {
|
||||
cancel, ok := imageManager[im.Name]
|
||||
if !ok {
|
||||
return errors.New("can not execute cancel")
|
||||
}
|
||||
cancel()
|
||||
return nil
|
||||
}
|
||||
428
framework/app-service/controllers/image_info_controller.go
Normal file
428
framework/app-service/controllers/image_info_controller.go
Normal file
|
|
@ -0,0 +1,428 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils/registry"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
refdocker "github.com/containerd/containerd/reference/docker"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
imagetypes "github.com/containers/image/v5/types"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
// AppImageInfoController represents a controller for managing the lifecycle of appimage.
|
||||
type AppImageInfoController struct {
|
||||
client.Client
|
||||
imageClient *containerd.Client
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the ImageManagerController with the provided controller manager
|
||||
func (r *AppImageInfoController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("app-image-controller", mgr, controller.Options{
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("set up app-image-controller failed %v", err)
|
||||
return err
|
||||
}
|
||||
if r.imageClient == nil {
|
||||
r.imageClient, err = containerd.New("/var/run/containerd/containerd.sock", containerd.WithDefaultNamespace("k8s.io"),
|
||||
containerd.WithTimeout(10*time.Second))
|
||||
if err != nil {
|
||||
klog.Errorf("create image client failed %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&appv1alpha1.AppImage{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, app *appv1alpha1.AppImage) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: app.Name,
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*appv1alpha1.AppImage]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*appv1alpha1.AppImage]) bool {
|
||||
return r.preEnqueueCheckForCreate(e.Object)
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*appv1alpha1.AppImage]) bool {
|
||||
return r.preEnqueueCheckForUpdate(e.ObjectOld, e.ObjectNew)
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*appv1alpha1.AppImage]) bool {
|
||||
return false
|
||||
},
|
||||
},
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to add watch err=%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile implements the reconciliation loop for the ImageManagerController
|
||||
func (r *AppImageInfoController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
ctrl.Log.Info("reconcile app image request", "name", req.Name)
|
||||
|
||||
var am appv1alpha1.AppImage
|
||||
err := r.Get(ctx, req.NamespacedName, &am)
|
||||
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "get app image error", "name", req.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// unexpected error, retry after 5s
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.reconcile(ctx, &am)
|
||||
if err != nil {
|
||||
ctrl.Log.Error(err, "get app image info error", "name", req.Name)
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) reconcile(ctx context.Context, instance *appv1alpha1.AppImage) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
var err error
|
||||
|
||||
var cur appv1alpha1.AppImage
|
||||
err = r.Get(ctx, types.NamespacedName{Name: instance.Name}, &cur)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get app manager name=%s err=%v", instance.Name, err)
|
||||
return err
|
||||
}
|
||||
if areAllNodesCompleted(cur.Spec, cur.Status.Conditions) {
|
||||
klog.Infof("all node completed app %s", instance.Name)
|
||||
err = r.updateStatus(ctx, &cur, []appv1alpha1.ImageInfo{}, "completed", "completed")
|
||||
if err != nil {
|
||||
klog.Errorf("update appimage status failed %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
klog.Infof("get image app %s request start", instance.Name)
|
||||
|
||||
state, message := "completed", "completed"
|
||||
imageInfos, err := r.GetImageInfo(ctx, &cur)
|
||||
if err != nil {
|
||||
state = "failed"
|
||||
message = err.Error()
|
||||
klog.Errorf("get image info failed %v", err)
|
||||
}
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
err = r.Get(ctx, types.NamespacedName{Name: instance.Name}, &cur)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.updateStatus(ctx, &cur, imageInfos, state, message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Infof("update app image status failed %v", err)
|
||||
return err
|
||||
}
|
||||
klog.Infof("get app %s image: %v info success, time elapsed: %v", instance.Name, instance.Spec.Refs, time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) preEnqueueCheckForCreate(obj client.Object) bool {
|
||||
am, _ := obj.(*appv1alpha1.AppImage)
|
||||
klog.Infof("enqueue check: %v", am.Status.State)
|
||||
if am.Status.State == "completed" || am.Status.State == "failed" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) preEnqueueCheckForUpdate(old, new client.Object) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) updateStatus(ctx context.Context, am *appv1alpha1.AppImage, imageInfos []appv1alpha1.ImageInfo, state, message string) error {
|
||||
var err error
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
err = r.Get(ctx, types.NamespacedName{Name: am.Name}, am)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
amCopy := am.DeepCopy()
|
||||
amCopy.Status.State = state
|
||||
amCopy.Status.StatueTime = &now
|
||||
amCopy.Status.Images = append(amCopy.Status.Images, imageInfos...)
|
||||
amCopy.Status.Message = message
|
||||
node := os.Getenv("NODE_NAME")
|
||||
amCopy.Status.Conditions = append(amCopy.Status.Conditions, appv1alpha1.Condition{
|
||||
Node: node,
|
||||
Completed: true,
|
||||
})
|
||||
|
||||
err = r.Status().Patch(ctx, amCopy, client.MergeFrom(am))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func areAllNodesCompleted(spec appv1alpha1.ImageSpec, conditions []appv1alpha1.Condition) bool {
|
||||
conditionMap := make(map[string]bool)
|
||||
for _, condition := range conditions {
|
||||
conditionMap[condition.Node] = condition.Completed
|
||||
}
|
||||
for _, node := range spec.Nodes {
|
||||
completed := conditionMap[node]
|
||||
if !completed {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func parseImageSource(ctx context.Context, name string) (imagetypes.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys := newSystemContext()
|
||||
return ref.NewImageSource(ctx, sys)
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) GetManifest(ctx context.Context, instance *appv1alpha1.AppImage, imageName string) (*imagetypes.ImageInspectInfo, error) {
|
||||
if instance.Annotations == nil || instance.Annotations[api.AppImagesKey] == "" {
|
||||
return r.getManifestViaNetwork(ctx, imageName)
|
||||
}
|
||||
imageInfoReqData := instance.Annotations[api.AppImagesKey]
|
||||
var imageInfoReq api.ImageInfoRequest
|
||||
err := json.Unmarshal([]byte(imageInfoReqData), &imageInfoReq)
|
||||
if err != nil {
|
||||
klog.Infof("failed to unmarshal image info %v", err)
|
||||
return r.getManifestViaNetwork(ctx, imageName)
|
||||
}
|
||||
var manifest *api.ImageInfoV2
|
||||
imageRef, err := refdocker.ParseDockerRef(imageName)
|
||||
if err != nil {
|
||||
klog.Errorf("invalid docker ref %s %v", imageName, err)
|
||||
return nil, err
|
||||
}
|
||||
for _, imageInfo := range imageInfoReq.Images {
|
||||
name, err := refdocker.ParseDockerRef(imageInfo.ImageName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, l := range imageInfo.InfoV2 {
|
||||
if name.String() == imageRef.String() {
|
||||
if l.Os == runtime.GOOS && l.Architecture == runtime.GOARCH {
|
||||
manifest = &l
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if manifest == nil || len(manifest.LayersData) == 0 {
|
||||
return r.getManifestViaNetwork(ctx, imageName)
|
||||
}
|
||||
klog.Infof("get app %s image manifest from annotations", imageName)
|
||||
return r.buildImageInspectFromManifest(manifest), nil
|
||||
}
|
||||
|
||||
func newSystemContext() *imagetypes.SystemContext {
|
||||
return &imagetypes.SystemContext{}
|
||||
}
|
||||
|
||||
type imageInfoResult struct {
|
||||
info appv1alpha1.ImageInfo
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) GetImageInfo(ctx context.Context, instance *appv1alpha1.AppImage) ([]appv1alpha1.ImageInfo, error) {
|
||||
nodeName := os.Getenv("NODE_NAME")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan imageInfoResult, len(instance.Spec.Refs))
|
||||
tokens := make(chan struct{}, 5)
|
||||
klog.Infof("refs: %d", len(instance.Spec.Refs))
|
||||
for _, originRef := range instance.Spec.Refs {
|
||||
wg.Add(1)
|
||||
go func(originRef string) {
|
||||
defer wg.Done()
|
||||
tokens <- struct{}{}
|
||||
defer func() { <-tokens }()
|
||||
name, err := refdocker.ParseDockerRef(originRef)
|
||||
if err != nil {
|
||||
klog.Errorf("invalid image ref %s %v", originRef, err)
|
||||
results <- imageInfoResult{err: err}
|
||||
return
|
||||
}
|
||||
manifest, err := r.GetManifest(ctx, instance, originRef)
|
||||
if err != nil {
|
||||
klog.Infof("get image %s manifest failed %v", name.String(), err)
|
||||
results <- imageInfoResult{err: err}
|
||||
return
|
||||
}
|
||||
|
||||
imageInfo := appv1alpha1.ImageInfo{
|
||||
Node: nodeName,
|
||||
Name: originRef,
|
||||
Architecture: manifest.Architecture,
|
||||
Variant: manifest.Variant,
|
||||
Os: manifest.Os,
|
||||
}
|
||||
imageLayers := make([]appv1alpha1.ImageLayer, 0)
|
||||
for _, layer := range manifest.LayersData {
|
||||
imageLayer := appv1alpha1.ImageLayer{
|
||||
MediaType: layer.MIMEType,
|
||||
Digest: layer.Digest.String(),
|
||||
Size: layer.Size,
|
||||
Annotations: layer.Annotations,
|
||||
}
|
||||
_, err = r.imageClient.ContentStore().Info(ctx, layer.Digest)
|
||||
if err == nil {
|
||||
imageLayer.Offset = layer.Size
|
||||
imageLayers = append(imageLayers, imageLayer)
|
||||
// go next layer
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, errdefs.ErrNotFound) {
|
||||
statuses, err := r.imageClient.ContentStore().ListStatuses(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("list statuses failed %v", err)
|
||||
results <- imageInfoResult{err: err}
|
||||
return
|
||||
}
|
||||
for _, status := range statuses {
|
||||
s := "layer-" + layer.Digest.String()
|
||||
if s == status.Ref {
|
||||
imageLayer.Offset = status.Offset
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
klog.Infof("get content info failed %v", err)
|
||||
results <- imageInfoResult{err: err}
|
||||
return
|
||||
}
|
||||
imageLayers = append(imageLayers, imageLayer)
|
||||
}
|
||||
imageInfo.LayersData = imageLayers
|
||||
results <- imageInfoResult{info: imageInfo}
|
||||
}(originRef)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
imageInfos := make([]appv1alpha1.ImageInfo, 0, len(instance.Spec.Refs))
|
||||
var firstErr error
|
||||
for result := range results {
|
||||
if result.err != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = result.err
|
||||
}
|
||||
} else {
|
||||
imageInfos = append(imageInfos, result.info)
|
||||
}
|
||||
}
|
||||
return imageInfos, firstErr
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) buildImageInspectFromManifest(manifest *api.ImageInfoV2) *imagetypes.ImageInspectInfo {
|
||||
layersData := make([]imagetypes.ImageInspectLayer, 0, len(manifest.Layers))
|
||||
for _, layer := range manifest.LayersData {
|
||||
layersData = append(layersData, imagetypes.ImageInspectLayer{
|
||||
MIMEType: layer.MIMEType,
|
||||
Digest: layer.Digest,
|
||||
Size: layer.Size,
|
||||
})
|
||||
}
|
||||
klog.Infof("buildImageInspectFromManifest: os: %s, arch: %s", runtime.GOOS, runtime.GOARCH)
|
||||
return &imagetypes.ImageInspectInfo{
|
||||
Os: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
LayersData: layersData,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *AppImageInfoController) getManifestViaNetwork(ctx context.Context, originRef string) (*imagetypes.ImageInspectInfo, error) {
|
||||
|
||||
name, err := refdocker.ParseDockerRef(originRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
replacedRef, _ := utils.ReplacedImageRef(registry.GetMirrors(), name.String(), false)
|
||||
|
||||
var src imagetypes.ImageSource
|
||||
srcImageName := "docker://" + replacedRef
|
||||
sysCtx := newSystemContext()
|
||||
fmt.Printf("imageName: %s\n", replacedRef)
|
||||
src, err = parseImageSource(ctx, srcImageName)
|
||||
if err != nil {
|
||||
klog.Infof("parse Image Source: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
unparsedInstance := image.UnparsedInstance(src, nil)
|
||||
_, _, err = unparsedInstance.Manifest(ctx)
|
||||
if err != nil {
|
||||
klog.Infof("parse manifest: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
img, err := image.FromUnparsedImage(ctx, sysCtx, unparsedInstance)
|
||||
if err != nil {
|
||||
klog.Infof("from unparsed image: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
klog.Infof("inspect image failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return imgInspect, err
|
||||
}
|
||||
5
framework/app-service/controllers/interface.go
Normal file
5
framework/app-service/controllers/interface.go
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
package controllers
|
||||
|
||||
type HelmOp interface {
|
||||
Install2() error
|
||||
}
|
||||
193
framework/app-service/controllers/load.go
Normal file
193
framework/app-service/controllers/load.go
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
appevent "bytetrade.io/web3os/app-service/pkg/event"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func LoadStatefulApp(ctx context.Context, appmgr *ApplicationManagerController, name string) (appstate.StatefulApp, appstate.StateError) {
|
||||
var am appv1alpha1.ApplicationManager
|
||||
err := appmgr.Get(ctx, types.NamespacedName{Name: name}, &am)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
|
||||
var app appv1alpha1.Application
|
||||
if err = appmgr.Get(ctx, types.NamespacedName{Name: name}, &app); err == nil {
|
||||
klog.Infof("LoadStatefulApp: application manager %s not found, but application %s exists", name, app.Name)
|
||||
// If the application manager is not found, but the application exists,
|
||||
// we need force delete the application.
|
||||
return nil, appstate.NewErrorUnknownState(func() func(ctx context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
go func() {
|
||||
delCtx, cancel := context.WithTimeout(context.Background(), 30*time.Minute)
|
||||
defer cancel()
|
||||
klog.Infof("LoadStatefulApp: force delete application %s", app.Name)
|
||||
if !apputils.IsProtectedNamespace(app.Spec.Namespace) {
|
||||
err := appmgr.Delete(delCtx,
|
||||
&corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: app.Spec.Namespace,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("LoadStatefulApp: force delete application %s failed: %v", app.Name, err)
|
||||
} else {
|
||||
klog.Infof("LoadStatefulApp: force delete application %s successfully", app.Name)
|
||||
}
|
||||
opID := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
appevent.PublishAppEventToQueue(utils.EventParams{
|
||||
Owner: app.Spec.Owner,
|
||||
Name: app.Spec.Name,
|
||||
OpType: string(appv1alpha1.UninstallOp),
|
||||
OpID: opID,
|
||||
State: string(appv1alpha1.Uninstalling),
|
||||
RawAppName: app.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: app.Spec.Settings["title"],
|
||||
Reason: constants.AppForceUninstall,
|
||||
Message: func() string {
|
||||
if err != nil {
|
||||
return fmt.Sprintf("force delete application %s failed: %v", app.Name, err)
|
||||
}
|
||||
return fmt.Sprintf("force delete application %s successfully", app.Name)
|
||||
}(),
|
||||
})
|
||||
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-delCtx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
var ns corev1.Namespace
|
||||
err = appmgr.Get(delCtx, types.NamespacedName{Name: app.Spec.Namespace}, &ns)
|
||||
klog.Infof("wait for namespace: %s to be deleted", app.Spec.Namespace)
|
||||
if apierrors.IsNotFound(err) {
|
||||
appevent.PublishAppEventToQueue(utils.EventParams{
|
||||
Owner: app.Spec.Owner,
|
||||
Name: app.Spec.Name,
|
||||
OpType: string(appv1alpha1.UninstallOp),
|
||||
OpID: opID,
|
||||
State: string(appv1alpha1.Uninstalled),
|
||||
RawAppName: app.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: app.Spec.Settings["title"],
|
||||
Reason: constants.AppForceUninstalled,
|
||||
Message: fmt.Sprintf("app %s was force uninstalled", app.Spec.Name),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
}, nil)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
return nil, appstate.NewStateError(err.Error())
|
||||
}
|
||||
|
||||
klog.Infof("LoadStatefulApp name:%s, state: %v", am.Name, am.Status.State)
|
||||
|
||||
retApp, serr := func() (appstate.StatefulApp, appstate.StateError) {
|
||||
switch am.Status.State {
|
||||
case appv1alpha1.Pending:
|
||||
return appstate.NewPendingApp(ctx, appmgr, &am, 24*time.Hour)
|
||||
case appv1alpha1.Downloading:
|
||||
return appstate.NewDownloadingApp(appmgr, &am, 24*time.Hour)
|
||||
case appv1alpha1.Installing:
|
||||
return appstate.NewInstallingApp(appmgr, &am, 30*time.Minute)
|
||||
case appv1alpha1.Initializing:
|
||||
return appstate.NewInitializingApp(appmgr, &am, 60*time.Minute)
|
||||
case appv1alpha1.Running:
|
||||
return appstate.NewRunningApp(ctx, appmgr, &am)
|
||||
case appv1alpha1.Stopping:
|
||||
return appstate.NewSuspendingApp(appmgr, &am, 30*time.Minute)
|
||||
case appv1alpha1.Upgrading:
|
||||
return appstate.NewUpgradingApp(appmgr, &am, 30*time.Minute)
|
||||
case appv1alpha1.ApplyingEnv:
|
||||
return appstate.NewApplyingEnvApp(appmgr, &am, 30*time.Minute)
|
||||
case appv1alpha1.Resuming:
|
||||
return appstate.NewResumingApp(appmgr, &am, 60*time.Minute)
|
||||
case appv1alpha1.PendingCanceling:
|
||||
return appstate.NewPendingCancelingApp(appmgr, &am)
|
||||
case appv1alpha1.DownloadingCanceling:
|
||||
return appstate.NewDownloadingCancelingApp(appmgr, &am)
|
||||
case appv1alpha1.InstallingCanceling:
|
||||
return appstate.NewInstallingCancelingApp(appmgr, &am, 30*time.Minute)
|
||||
case appv1alpha1.InitializingCanceling:
|
||||
return appstate.NewInitializingCancelingApp(appmgr, &am)
|
||||
case appv1alpha1.ResumingCanceling:
|
||||
return appstate.NewResumingCancelingApp(appmgr, &am)
|
||||
case appv1alpha1.UpgradingCanceling:
|
||||
return appstate.NewUpgradingCancelingApp(appmgr, &am)
|
||||
case appv1alpha1.ApplyingEnvCanceling:
|
||||
return appstate.NewApplyingEnvCancelingApp(appmgr, &am)
|
||||
case appv1alpha1.Uninstalling:
|
||||
return appstate.NewUninstallingApp(appmgr, &am, 15*time.Minute)
|
||||
case appv1alpha1.StopFailed:
|
||||
return appstate.NewSuspendFailedApp(appmgr, &am)
|
||||
case appv1alpha1.UninstallFailed:
|
||||
return appstate.NewUninstallFailedApp(appmgr, &am)
|
||||
case appv1alpha1.UpgradeFailed:
|
||||
return appstate.NewUpgradeFailedApp(appmgr, &am)
|
||||
case appv1alpha1.ApplyEnvFailed:
|
||||
return appstate.NewApplyEnvFailedApp(appmgr, &am)
|
||||
case appv1alpha1.ResumeFailed:
|
||||
return appstate.NewResumeFailedApp(appmgr, &am)
|
||||
|
||||
case appv1alpha1.DownloadFailed,
|
||||
appv1alpha1.PendingCanceled, appv1alpha1.DownloadingCanceled,
|
||||
appv1alpha1.InstallingCanceled, appv1alpha1.InitializingCanceled,
|
||||
appv1alpha1.UpgradingCanceled, appv1alpha1.ApplyingEnvCanceled,
|
||||
appv1alpha1.ResumingCanceled, appv1alpha1.Stopped:
|
||||
return appstate.NewDoNothingApp(appmgr, &am)
|
||||
case appv1alpha1.InstallFailed:
|
||||
return appstate.NewInstallFailedApp(appmgr, &am)
|
||||
case appv1alpha1.PendingCancelFailed:
|
||||
return appstate.NewPendingCancelFailedApp(appmgr, &am)
|
||||
case appv1alpha1.DownloadingCancelFailed:
|
||||
return appstate.NewDownloadingCancelFailedApp(appmgr, &am)
|
||||
|
||||
case appv1alpha1.InstallingCancelFailed:
|
||||
return appstate.NewInstallingCancelFailedApp(appmgr, &am)
|
||||
case appv1alpha1.UpgradingCancelFailed:
|
||||
return appstate.NewUpgradingCancelFailedApp(appmgr, &am)
|
||||
case appv1alpha1.ApplyingEnvCancelFailed:
|
||||
return appstate.NewApplyingEnvCancelFailedApp(appmgr, &am)
|
||||
case appv1alpha1.Uninstalled:
|
||||
return appstate.NewUninstalledApp(ctx, appmgr, &am)
|
||||
}
|
||||
|
||||
return nil, appstate.NewErrorUnknownState(nil, nil)
|
||||
}()
|
||||
|
||||
if serr != nil {
|
||||
klog.Infof("load stateful app name=%s, state=%s failed err %v", am.Name, am.Status.State, serr)
|
||||
return nil, serr
|
||||
}
|
||||
|
||||
return retApp, nil
|
||||
}
|
||||
142
framework/app-service/controllers/namespace_controller.go
Normal file
142
framework/app-service/controllers/namespace_controller.go
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/utils/sliceutil"
|
||||
|
||||
iamv1alpha2 "github.com/beclab/api/iam/v1alpha2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerNamespaceName = "namespace-controller"
|
||||
)
|
||||
|
||||
// NamespaceReconciler reconciles a Namespace object
|
||||
type NamespaceReconciler struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerNamespaceName).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: 1,
|
||||
}).
|
||||
For(&corev1.Namespace{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
rootCtx := context.Background()
|
||||
namespace := &corev1.Namespace{}
|
||||
if err := r.Get(rootCtx, req.NamespacedName, namespace); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
// name of your custom finalizer
|
||||
//finalizer := "finalizers.kubesphere.io/namespaces"
|
||||
|
||||
if namespace.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// The object is not being deleted, so if it does not have our finalizer,
|
||||
// then lets add the finalizer and update the object.
|
||||
if !sliceutil.HasString(namespace.ObjectMeta.Finalizers, namespaceFinalizer) {
|
||||
// create only once, ignore already exists error
|
||||
if err := r.initCreatorRoleBinding(rootCtx, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
namespace.ObjectMeta.Finalizers = append(namespace.ObjectMeta.Finalizers, namespaceFinalizer)
|
||||
if namespace.Labels == nil {
|
||||
namespace.Labels = make(map[string]string)
|
||||
}
|
||||
// used for NetworkPolicyPeer.NamespaceSelector
|
||||
namespace.Labels["bytetrade.io/namespace"] = namespace.Name
|
||||
if err := r.Update(rootCtx, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(namespace.ObjectMeta.Finalizers, namespaceFinalizer) {
|
||||
// remove our finalizer from the list and update it.
|
||||
namespace.ObjectMeta.Finalizers = sliceutil.RemoveString(namespace.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == namespaceFinalizer
|
||||
})
|
||||
if err := r.Update(rootCtx, namespace); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
// Our finalizer has finished, so the reconciler can do nothing.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *NamespaceReconciler) initCreatorRoleBinding(ctx context.Context, namespace *corev1.Namespace) error {
|
||||
creator := namespace.Annotations[creator]
|
||||
if creator == "" {
|
||||
return nil
|
||||
}
|
||||
var user iamv1alpha2.User
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: creator}, &user); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
creatorRoleBinding := newCreatorRoleBinding(creator, namespace.Name)
|
||||
if err := r.Client.Create(ctx, creatorRoleBinding); err != nil {
|
||||
if errors.IsAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCreatorRoleBinding(creator string, namespace string) *rbacv1.RoleBinding {
|
||||
return &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", creator, iamv1alpha2.NamespaceAdmin),
|
||||
Labels: map[string]string{iamv1alpha2.UserReferenceLabel: creator},
|
||||
Namespace: namespace,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: iamv1alpha2.ResourceKindRole,
|
||||
Name: iamv1alpha2.NamespaceAdmin,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Name: creator,
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
249
framework/app-service/controllers/node_alert_controller.go
Normal file
249
framework/app-service/controllers/node_alert_controller.go
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
// NodePressureType represents the type of node pressure
|
||||
type NodePressureType string
|
||||
|
||||
const (
|
||||
MemoryPressure NodePressureType = "MemoryPressure"
|
||||
DiskPressure NodePressureType = "DiskPressure"
|
||||
PIDPressure NodePressureType = "PIDPressure"
|
||||
)
|
||||
|
||||
// NodeAlertMessage represents the message structure for node alerts
|
||||
type alertPayload struct {
|
||||
NodeName string `json:"nodeName"`
|
||||
PressureType NodePressureType `json:"pressureType"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Message string `json:"message"`
|
||||
Status bool `json:"status"`
|
||||
}
|
||||
|
||||
type NodeAlertEvent struct {
|
||||
Topic NodePressureType `json:"topic"`
|
||||
Payload alertPayload `json:"payload"`
|
||||
}
|
||||
|
||||
// NodeAlertController reconciles a Node object
|
||||
type NodeAlertController struct {
|
||||
client.Client
|
||||
KubeConfig *rest.Config
|
||||
// lastAlertTime tracks the last time an alert was sent for each pressure type
|
||||
lastAlertTime map[string]time.Time
|
||||
// lastPressureState tracks the last known pressure state for each node and pressure type
|
||||
lastPressureState map[string]bool
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *NodeAlertController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("node-alert-controller", mgr, controller.Options{
|
||||
MaxConcurrentReconciles: 1,
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("node-alert-controller setup failed %v", err)
|
||||
return fmt.Errorf("node-alert-controller setup failed %w", err)
|
||||
}
|
||||
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&corev1.Node{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, node *corev1.Node) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: node.GetName(),
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*corev1.Node]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*corev1.Node]) bool {
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Node]) bool {
|
||||
return true
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Node]) bool {
|
||||
return false
|
||||
},
|
||||
},
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("node-alert-controller add watch failed %v", err)
|
||||
return fmt.Errorf("add watch failed %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop
|
||||
func (r *NodeAlertController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.Infof("start reconcile node %s", req.Name)
|
||||
|
||||
if r.lastAlertTime == nil {
|
||||
r.lastAlertTime = make(map[string]time.Time)
|
||||
}
|
||||
if r.lastPressureState == nil {
|
||||
r.lastPressureState = make(map[string]bool)
|
||||
}
|
||||
|
||||
node := &corev1.Node{}
|
||||
err := r.Get(ctx, req.NamespacedName, node)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get node %s: %v", req.Name, err)
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
err = r.checkNodePressure(node)
|
||||
if err != nil {
|
||||
klog.Errorf("check node pressure failed %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("finished reconcile node %s", req.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// checkNodePressure checks for various pressure conditions on the node
|
||||
func (r *NodeAlertController) checkNodePressure(node *corev1.Node) error {
|
||||
pressureTypes := []NodePressureType{MemoryPressure, DiskPressure, PIDPressure}
|
||||
|
||||
for _, pressureType := range pressureTypes {
|
||||
err := r.checkPressureStateChange(node, pressureType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkPressureStateChange checks for pressure state changes and sends alerts accordingly
|
||||
func (r *NodeAlertController) checkPressureStateChange(node *corev1.Node, pressureType NodePressureType) error {
|
||||
currentPressure := false
|
||||
conditionMessage := ""
|
||||
|
||||
for _, condition := range node.Status.Conditions {
|
||||
var conditionType corev1.NodeConditionType
|
||||
switch pressureType {
|
||||
case MemoryPressure:
|
||||
conditionType = corev1.NodeMemoryPressure
|
||||
case DiskPressure:
|
||||
conditionType = corev1.NodeDiskPressure
|
||||
case PIDPressure:
|
||||
conditionType = corev1.NodePIDPressure
|
||||
}
|
||||
|
||||
if condition.Type == conditionType {
|
||||
currentPressure = condition.Status == corev1.ConditionTrue
|
||||
conditionMessage = condition.Message
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
key := fmt.Sprintf("%s-%s", node.Name, pressureType)
|
||||
lastPressure, _ := r.lastPressureState[key]
|
||||
if lastPressure != currentPressure {
|
||||
if currentPressure {
|
||||
// from available to pressure
|
||||
err := r.sendNodeAlert(node.Name, pressureType, conditionMessage, true)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to publish available to pressure, type: %s, err: %v", pressureType, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// from pressure to available
|
||||
err := r.sendNodeAlert(node.Name, pressureType, conditionMessage, false)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to publish pressure to available, type: %s, err: %v", pressureType, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if currentPressure {
|
||||
// pressure persists
|
||||
if r.shouldSendAlert(node.Name, pressureType) {
|
||||
err := r.sendNodeAlert(node.Name, pressureType, conditionMessage, true)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to publish persists pressure, type: %s, err: %v", pressureType, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
r.lastPressureState[key] = currentPressure
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldSendAlert checks if enough time has passed since the last alert for this pressure type
|
||||
func (r *NodeAlertController) shouldSendAlert(nodeName string, pressureType NodePressureType) bool {
|
||||
key := fmt.Sprintf("%s-%s", nodeName, pressureType)
|
||||
lastTime, exists := r.lastAlertTime[key]
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if 60 minutes has passed since the last alert
|
||||
return time.Since(lastTime) >= 60*time.Minute
|
||||
}
|
||||
|
||||
// sendNodeAlertUnlocked sends an alert message to NATS
|
||||
func (r *NodeAlertController) sendNodeAlert(nodeName string, pressureType NodePressureType, message string, isPressure bool) error {
|
||||
key := fmt.Sprintf("%s-%s", nodeName, pressureType)
|
||||
|
||||
status := false
|
||||
if isPressure {
|
||||
status = true
|
||||
}
|
||||
|
||||
data := NodeAlertEvent{
|
||||
Topic: pressureType,
|
||||
Payload: alertPayload{
|
||||
NodeName: nodeName,
|
||||
PressureType: pressureType,
|
||||
Timestamp: time.Now(),
|
||||
Message: message,
|
||||
Status: status,
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.publishToNats("os.notification", data); err != nil {
|
||||
klog.Errorf("failed to publish node alert to NATS: %v", err)
|
||||
return err
|
||||
} else {
|
||||
if isPressure {
|
||||
klog.Infof("successfully published node pressure alert for %s: %s", nodeName, pressureType)
|
||||
} else {
|
||||
klog.Infof("successfully published node pressure recovery for %s: %s", nodeName, pressureType)
|
||||
}
|
||||
}
|
||||
if isPressure {
|
||||
r.lastAlertTime[key] = time.Now()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// publishToNats publishes a message to the specified NATS subject
|
||||
func (r *NodeAlertController) publishToNats(subject string, data interface{}) error {
|
||||
return utils.PublishToNats(subject, data)
|
||||
}
|
||||
|
|
@ -0,0 +1,269 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch
|
||||
//+kubebuilder:rbac:groups=app.bytetrade.io,resources=applicationmanagers,verbs=get;list;watch;update;patch
|
||||
//+kubebuilder:rbac:groups=app.bytetrade.io,resources=applicationmanagers/status,verbs=get;update;patch
|
||||
|
||||
const (
|
||||
envPendingPodSuspendAppTimeout = "PENDING_POD_SUSPEND_APP_TIMEOUT"
|
||||
)
|
||||
|
||||
// PodAbnormalSuspendAppController watches Pods belonging to applications and suspends the app
|
||||
// when a Pod is Evicted, or when Pending and Unschedulable beyond a timeout.
|
||||
type PodAbnormalSuspendAppController struct {
|
||||
client.Client
|
||||
pendingTimeout time.Duration
|
||||
}
|
||||
|
||||
func (r *PodAbnormalSuspendAppController) SetUpWithManager(mgr ctrl.Manager) error {
|
||||
r.pendingTimeout = parsePendingTimeout(os.Getenv(envPendingPodSuspendAppTimeout))
|
||||
|
||||
c, err := controller.New("pod-abnormal-suspend-app-controller", mgr, controller.Options{
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.Infof("pod-abnormal-suspend-app-controller initialized, pendingTimeout=%v", r.pendingTimeout)
|
||||
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&corev1.Pod{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, pod *corev1.Pod) []reconcile.Request {
|
||||
if !hasRequiredAppLabels(pod) {
|
||||
return nil
|
||||
}
|
||||
klog.Infof("enqueue pod for abnormal check name=%s namespace=%s app=%s owner=%s", pod.Name, pod.Namespace, pod.GetLabels()[constants.ApplicationNameLabel], pod.GetLabels()[constants.ApplicationOwnerLabel])
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*corev1.Pod]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*corev1.Pod]) bool {
|
||||
return hasRequiredAppLabels(e.Object)
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*corev1.Pod]) bool {
|
||||
return hasRequiredAppLabels(e.ObjectNew)
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*corev1.Pod]) bool {
|
||||
return false
|
||||
},
|
||||
},
|
||||
))
|
||||
if err != nil {
|
||||
klog.Errorf("pod-abnormal-suspend-app-controller failed to watch err=%v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasRequiredAppLabels(pod *corev1.Pod) bool {
|
||||
if pod == nil {
|
||||
return false
|
||||
}
|
||||
name := pod.GetLabels()[constants.ApplicationNameLabel]
|
||||
owner := pod.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
return name != "" && owner != ""
|
||||
}
|
||||
|
||||
func (r *PodAbnormalSuspendAppController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
var pod corev1.Pod
|
||||
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
klog.Infof("reconcile pod name=%s namespace=%s phase=%s reason=%s", pod.Name, pod.Namespace, pod.Status.Phase, pod.Status.Reason)
|
||||
|
||||
appName := pod.GetLabels()[constants.ApplicationNameLabel]
|
||||
owner := pod.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
if appName == "" || owner == "" {
|
||||
klog.Infof("ignore pod name=%s namespace=%s due to missing app labels", pod.Name, pod.Namespace)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp != nil {
|
||||
klog.Infof("ignore pod name=%s namespace=%s because it is being deleted", pod.Name, pod.Namespace)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if pod.Status.Reason == "Evicted" {
|
||||
klog.Infof("pod evicted name=%s namespace=%s, attempting to suspend app=%s owner=%s", pod.Name, pod.Namespace, appName, owner)
|
||||
ok, err := r.trySuspendApp(ctx, owner, appName, constants.AppStopDueToEvicted, "evicted pod: "+pod.Namespace+"/"+pod.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("suspend attempt failed for app=%s owner=%s: %v", appName, owner, err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !ok {
|
||||
klog.Infof("app not suspendable yet app=%s owner=%s, requeue after 5s", appName, owner)
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if isScheduled(&pod) {
|
||||
klog.Infof("ignore pod name=%s namespace=%s because it is scheduled", pod.Name, pod.Namespace)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if pendingSince, found := pendingUnschedulableSince(&pod); found {
|
||||
elapsed := time.Since(pendingSince)
|
||||
klog.Infof("pod pending unschedulable name=%s namespace=%s since=%s elapsed=%v timeout=%v", pod.Name, pod.Namespace, pendingSince.Format(time.RFC3339), elapsed, r.pendingTimeout)
|
||||
if elapsed < r.pendingTimeout {
|
||||
delay := r.pendingTimeout - elapsed
|
||||
klog.Infof("requeue pod name=%s namespace=%s after %v until timeout", pod.Name, pod.Namespace, delay)
|
||||
return ctrl.Result{RequeueAfter: r.pendingTimeout - elapsed}, nil
|
||||
}
|
||||
|
||||
klog.Infof("attempting to suspend app=%s owner=%s due to pending unschedulable timeout", appName, owner)
|
||||
ok, err := r.trySuspendApp(ctx, owner, appName, constants.AppUnschedulable, "pending unschedulable timeout on pod: "+pod.Namespace+"/"+pod.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("suspend attempt failed for app=%s owner=%s: %v", appName, owner, err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !ok {
|
||||
klog.Infof("app not suspendable yet app=%s owner=%s, requeue after 5s", appName, owner)
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func isScheduled(pod *corev1.Pod) bool {
|
||||
if pod == nil {
|
||||
return false
|
||||
}
|
||||
if pod.Status.Phase != corev1.PodPending {
|
||||
return true
|
||||
}
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == corev1.PodScheduled {
|
||||
return c.Status == corev1.ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func pendingUnschedulableSince(pod *corev1.Pod) (time.Time, bool) {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == corev1.PodScheduled && c.Status == corev1.ConditionFalse && c.Reason == corev1.PodReasonUnschedulable {
|
||||
if !c.LastTransitionTime.IsZero() {
|
||||
return c.LastTransitionTime.Time, true
|
||||
}
|
||||
// fallback to creation time if transition is missing
|
||||
return pod.CreationTimestamp.Time, true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
// trySuspendApp attempts to suspend the app and returns (true, nil) if a suspend request was issued.
|
||||
// If the app is not suspendable yet, returns (false, nil) to trigger a short requeue.
|
||||
func (r *PodAbnormalSuspendAppController) trySuspendApp(ctx context.Context, owner, appName, reason, message string) (bool, error) {
|
||||
name, err := apputils.FmtAppMgrName(appName, owner, "")
|
||||
if err != nil {
|
||||
klog.Errorf("failed to format app manager name app=%s owner=%s: %v", appName, owner, err)
|
||||
return false, err
|
||||
}
|
||||
var am appv1alpha1.ApplicationManager
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: name}, &am); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Infof("applicationmanager not found name=%s for app=%s owner=%s", name, appName, owner)
|
||||
return false, nil
|
||||
}
|
||||
klog.Errorf("failed to get applicationmanager name=%s for app=%s owner=%s: %v", name, appName, owner, err)
|
||||
return false, err
|
||||
}
|
||||
if am.Status.State == appv1alpha1.Stopped {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !appstate.IsOperationAllowed(am.Status.State, appv1alpha1.StopOp) {
|
||||
klog.Infof("operation StopOp not allowed in state=%s for app=%s owner=%s", am.Status.State, appName, owner)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
am.Spec.OpType = appv1alpha1.StopOp
|
||||
if err := r.Update(ctx, &am); err != nil {
|
||||
klog.Errorf("failed to update applicationmanager spec to StopOp name=%s app=%s owner=%s: %v", am.Name, appName, owner, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
opID := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
now := metav1.Now()
|
||||
status := appv1alpha1.ApplicationManagerStatus{
|
||||
OpType: appv1alpha1.StopOp,
|
||||
OpID: opID,
|
||||
State: appv1alpha1.Stopping,
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
if _, err := apputils.UpdateAppMgrStatus(name, status); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
utils.PublishAppEvent(utils.EventParams{
|
||||
Owner: am.Spec.AppOwner,
|
||||
Name: am.Spec.AppName,
|
||||
OpType: string(status.OpType),
|
||||
OpID: opID,
|
||||
State: appv1alpha1.Stopping.String(),
|
||||
Progress: message,
|
||||
RawAppName: am.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: apputils.AppTitle(am.Spec.Config),
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
})
|
||||
klog.Infof("suspend requested for app=%s owner=%s, reason=%s", am.Spec.AppName, am.Spec.AppOwner, message)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func parsePendingTimeout(v string) time.Duration {
|
||||
if v == "" {
|
||||
klog.Infof("%s not set, using default 3m", envPendingPodSuspendAppTimeout)
|
||||
return 3 * time.Minute
|
||||
}
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil || d <= 0 {
|
||||
klog.Warningf("invalid %s value %q, using default 3m", envPendingPodSuspendAppTimeout, v)
|
||||
return 3 * time.Minute
|
||||
}
|
||||
klog.Infof("%s set to %v", envPendingPodSuspendAppTimeout, d)
|
||||
return d
|
||||
}
|
||||
935
framework/app-service/controllers/security_controller.go
Normal file
935
framework/app-service/controllers/security_controller.go
Normal file
|
|
@ -0,0 +1,935 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/appcfg"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/security"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/thoas/go-funk"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerName = "security-controller"
|
||||
)
|
||||
|
||||
// SecurityReconciler represents a reconciler for managing security
|
||||
type SecurityReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
Logger *logr.Logger
|
||||
DynamicClient dynamic.Interface
|
||||
}
|
||||
|
||||
var loggerKey struct{}
|
||||
|
||||
// SetupWithManager sets up the SecurityReconciler with the provided controller manager
|
||||
func (r *SecurityReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
|
||||
if r.Client == nil {
|
||||
r.Client = mgr.GetClient()
|
||||
}
|
||||
if r.Logger == nil {
|
||||
l := ctrl.Log.WithName("controllers").WithName(controllerName)
|
||||
r.Logger = &l
|
||||
}
|
||||
c, err := ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
For(&corev1.Namespace{}).
|
||||
Build(r)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// watch the networkpolicy enqueue formarted request
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&netv1.NetworkPolicy{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, np *netv1.NetworkPolicy) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: np.GetNamespace(),
|
||||
}}}
|
||||
})))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
watches := []client.Object{
|
||||
&appsv1.Deployment{},
|
||||
&appsv1.StatefulSet{},
|
||||
&corev1.Node{},
|
||||
}
|
||||
|
||||
// watch the object installed by app-installer
|
||||
for _, w := range watches {
|
||||
if err = r.addWatch(ctx, c, mgr, w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := mgr.GetRESTMapper().RESTMapping(schema.GroupKind{Group: "apps.kubeblocks.io", Kind: "Cluster"}, "v1"); err == nil {
|
||||
if err = r.addCronWorkflowWatch(ctx, c, mgr); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
r.Logger.Info("CronWorkflow CRD not installed, skip adding watch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) addCronWorkflowWatch(ctx context.Context, c controller.Controller, mgr ctrl.Manager) error {
|
||||
u := &unstructuredv1.Unstructured{}
|
||||
u.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: "apps.kubeblocks.io",
|
||||
Version: "v1",
|
||||
Kind: "Cluster",
|
||||
})
|
||||
|
||||
return c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
u,
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, h *unstructuredv1.Unstructured) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: h.GetNamespace(),
|
||||
}}}
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) addWatch(ctx context.Context, c controller.Controller, mgr ctrl.Manager, watchedObject client.Object) error {
|
||||
return c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
watchedObject,
|
||||
handler.EnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, h client.Object) []reconcile.Request {
|
||||
if _, ok := h.(*corev1.Node); ok {
|
||||
r.Logger.Info("node event fired, modify network policy to add node tunnel ip")
|
||||
if reqs, err := r.namespacesShouldAllowNodeTunnel(ctx); err == nil {
|
||||
return reqs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := h.(*corev1.Namespace); ok {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: h.GetName(),
|
||||
}}}
|
||||
}
|
||||
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: h.GetNamespace(),
|
||||
}}}
|
||||
}),
|
||||
predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
return isNodeChanged(e.ObjectNew, e.ObjectOld) || isApp(e.ObjectNew, e.ObjectOld) || isWorkflow(e.ObjectNew, e.ObjectOld)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
return isNodeChanged(e.Object) || isApp(e.Object) || isWorkflow(e.Object)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
return isNodeChanged(e.Object) || isApp(e.Object) || isWorkflow(e.Object)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
// Reconcile implements the reconciliation loop for the SecurityReconciler
|
||||
func (r *SecurityReconciler) Reconcile(rootCtx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.Logger.WithValues("namespace", req.NamespacedName)
|
||||
ctx := context.WithValue(rootCtx, loggerKey, logger)
|
||||
|
||||
namespace := &corev1.Namespace{}
|
||||
if err := r.Get(ctx, req.NamespacedName, namespace); err != nil {
|
||||
logger.Error(err, "Failed to get namespace")
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
logger.Info("namespace reconcile request")
|
||||
|
||||
if namespace.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
// When a new namespace that's not a specific one (system, user internal) was created,
|
||||
// we don't give it any labels until the app installer deploys the pods.
|
||||
// non-labels namespace can't access any other namespace's network
|
||||
if err := r.reconcileNamespaceLabels(ctx, namespace); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
logger.Info("Conflict while update namespace labels.")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.reconcileNetworkPolicy(ctx, namespace); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
logger.Info("Conflict while update namespace network policy.")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) reconcileNamespaceLabels(ctx context.Context, ns *corev1.Namespace) error {
|
||||
logger := ctx.Value(loggerKey).(logr.Logger)
|
||||
updated := false
|
||||
if ns.Labels == nil {
|
||||
ns.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
if security.IsOSSystemNamespace(ns.Name) ||
|
||||
security.IsUnderLayerNamespace(ns.Name) ||
|
||||
security.IsOSGpuNamespace(ns.Name) {
|
||||
// make underlay namespaces can access other namespaces' network
|
||||
// especially for prometheus exporters
|
||||
if label, ok := ns.Labels[security.NamespaceTypeLabel]; !ok || label != security.System {
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.System
|
||||
updated = true
|
||||
}
|
||||
} else if security.IsOSNetworkNamespace(ns.Name) {
|
||||
// make os network namespace can access other namespaces' network
|
||||
if label, ok := ns.Labels[security.NamespaceTypeLabel]; !ok || label != security.Network {
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.Network
|
||||
updated = true
|
||||
}
|
||||
} else if security.IsOSProtectedNamespace(ns.Name) {
|
||||
// make os protected namespace can access other namespaces' network
|
||||
if label, ok := ns.Labels[security.NamespaceTypeLabel]; !ok || label != security.Protected {
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.Protected
|
||||
updated = true
|
||||
}
|
||||
} else if ok, owner := security.IsUserInternalNamespaces(ns.Name); ok {
|
||||
// only user-system-<owner> namespace can access other namespaces' network
|
||||
if security.IsUserSystemNamespaces(ns.Name) {
|
||||
if label, ok := ns.Labels[security.NamespaceTypeLabel]; !ok || label != security.Internal {
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.Internal
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
|
||||
if security.IsUserSpaceNamespaces(ns.Name) {
|
||||
if label, ok := ns.Labels[security.NamespaceTypeLabel]; !ok || label != security.UserSpace {
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.UserSpace
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
|
||||
if label, ok := ns.Labels[security.NamespaceOwnerLabel]; !ok || label != owner {
|
||||
ns.Labels[security.NamespaceOwnerLabel] = owner
|
||||
updated = true
|
||||
}
|
||||
} else {
|
||||
owner, internal, system, shared, isMiddleware, err := r.findOwnerOfNamespace(ctx, ns)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to find owner of namespace %s: %v", ns.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("find owner of namespace", "namespace", ns.Name, "owner", owner, "internal", internal, "system", system, "shared", shared)
|
||||
|
||||
if owner != "" {
|
||||
|
||||
if label, ok := ns.Labels[security.NamespaceOwnerLabel]; !ok || label != owner {
|
||||
ns.Labels[security.NamespaceOwnerLabel] = owner
|
||||
switch {
|
||||
case internal:
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.Internal
|
||||
}
|
||||
updated = true
|
||||
}
|
||||
} else {
|
||||
// remove owner label
|
||||
if _, ok := ns.Labels[security.NamespaceOwnerLabel]; ok {
|
||||
delete(ns.Labels, security.NamespaceOwnerLabel)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
if system {
|
||||
ns.Labels[security.NamespaceTypeLabel] = security.System
|
||||
updated = true
|
||||
}
|
||||
|
||||
if shared {
|
||||
if label, ok := ns.Labels[security.NamespaceSharedLabel]; !ok || label != "true" {
|
||||
ns.Labels[security.NamespaceSharedLabel] = "true"
|
||||
updated = true
|
||||
}
|
||||
} else {
|
||||
if _, ok := ns.Labels[security.NamespaceSharedLabel]; ok {
|
||||
delete(ns.Labels, security.NamespaceSharedLabel)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
// set middleware namespace label
|
||||
if isMiddleware {
|
||||
if label, ok := ns.Labels[security.NamespaceMiddlewareLabel]; !ok || label != "true" {
|
||||
ns.Labels[security.NamespaceMiddlewareLabel] = "true"
|
||||
updated = true
|
||||
}
|
||||
} else {
|
||||
if _, ok := ns.Labels[security.NamespaceMiddlewareLabel]; ok {
|
||||
delete(ns.Labels, security.NamespaceMiddlewareLabel)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if updated {
|
||||
logger.Info("Update labels of namespace")
|
||||
err := r.Update(ctx, ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) createOrUpdateNetworkPolicy(ctx context.Context,
|
||||
networkPolicy *netv1.NetworkPolicy,
|
||||
networkPolicyFix func(np *netv1.NetworkPolicy),
|
||||
namespaceNetworkPolicies *security.NetworkPolicies,
|
||||
) error {
|
||||
var nps netv1.NetworkPolicyList
|
||||
key := client.ObjectKey{
|
||||
Namespace: networkPolicy.Namespace,
|
||||
Name: networkPolicy.Name,
|
||||
}
|
||||
err := r.List(ctx, &nps, client.InNamespace(networkPolicy.Namespace))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, np := range nps.Items {
|
||||
if np.Name == key.Name && np.Namespace == key.Namespace {
|
||||
np.Spec = *networkPolicy.Spec.DeepCopy()
|
||||
if networkPolicyFix != nil {
|
||||
networkPolicyFix(&np)
|
||||
}
|
||||
if err := r.Update(ctx, &np); err != nil {
|
||||
return err
|
||||
}
|
||||
found = true
|
||||
} else {
|
||||
if namespaceNetworkPolicies != nil && !namespaceNetworkPolicies.Contains(&np) {
|
||||
if err := r.Delete(ctx, &np); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apierrors.IsNotFound(err) || !found {
|
||||
np := *networkPolicy.DeepCopy()
|
||||
if networkPolicyFix != nil {
|
||||
networkPolicyFix(&np)
|
||||
}
|
||||
|
||||
if err := r.Create(ctx, &np); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) reconcileNetworkPolicy(ctx context.Context, ns *corev1.Namespace) error {
|
||||
logger := ctx.Value(loggerKey).(logr.Logger)
|
||||
finalizer := "finalizers.bytetrade.io/namespaces"
|
||||
|
||||
if security.IsPublicNamespace(ns.Name) {
|
||||
// public namespace should not have network policy
|
||||
return nil
|
||||
}
|
||||
|
||||
if ns.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !funk.Contains(ns.ObjectMeta.Finalizers, finalizer) {
|
||||
ns.ObjectMeta.Finalizers = append(ns.ObjectMeta.Finalizers, finalizer)
|
||||
if err := r.Update(ctx, ns); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var networkPolicy security.NetworkPolicies
|
||||
var npFix func(np *netv1.NetworkPolicy)
|
||||
if security.IsUnderLayerNamespace(ns.Name) {
|
||||
networkPolicy = security.NetworkPolicies{security.NPUnderLayerSystem.DeepCopy()}
|
||||
networkPolicy.SetName("underlayer-system-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = nil
|
||||
} else if security.IsOSSystemNamespace(ns.Name) {
|
||||
networkPolicy = security.NetworkPolicies{security.NPOSSystem.DeepCopy(), security.NSFilesPolicy.DeepCopy(), security.NPSystemProvider.DeepCopy(), security.NPSystemMiddleware.DeepCopy()}
|
||||
networkPolicy.SetName("os-system-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = nil
|
||||
} else if security.IsOSProtectedNamespace(ns.Name) {
|
||||
networkPolicy = security.NetworkPolicies{security.NPOSProtected.DeepCopy(), security.NPSystemProvider.DeepCopy()}
|
||||
networkPolicy.SetName("os-protected-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = nil
|
||||
} else if security.IsOSNetworkNamespace(ns.Name) {
|
||||
networkPolicy = security.NetworkPolicies{security.NPOSNetwork.DeepCopy()}
|
||||
networkPolicy.SetName("os-network-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
np.Spec.Ingress = append(np.Spec.Ingress, netv1.NetworkPolicyIngressRule{
|
||||
From: security.NodeTunnelRule(),
|
||||
})
|
||||
}
|
||||
} else if security.IsUserSystemNamespaces(ns.Name) {
|
||||
networkPolicy = security.NetworkPolicies{security.NPUserSystem.DeepCopy()}
|
||||
networkPolicy.SetName("user-system-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
owner := ns.Labels[security.NamespaceOwnerLabel]
|
||||
logger.Info("update network policy", "name", networkPolicy.Name(), "owner", owner)
|
||||
np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[security.NamespaceOwnerLabel] = owner
|
||||
}
|
||||
} else if security.IsUserSpaceNamespaces(ns.Name) {
|
||||
networkPolicy = security.NetworkPolicies{security.NPUserSpace.DeepCopy(), security.NPIngress.DeepCopy()}
|
||||
networkPolicy.SetName("user-space-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
owner := ns.Labels[security.NamespaceOwnerLabel]
|
||||
logger.Info("update network policy", "name", networkPolicy.Name(), "owner", owner)
|
||||
np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[security.NamespaceOwnerLabel] = owner
|
||||
np.Spec.Ingress = append(np.Spec.Ingress, netv1.NetworkPolicyIngressRule{
|
||||
From: security.NodeTunnelRule(),
|
||||
})
|
||||
}
|
||||
} else if isMiddleware, ok := ns.Labels[security.NamespaceMiddlewareLabel]; ok && isMiddleware == "true" {
|
||||
networkPolicy = security.NetworkPolicies{security.NPAllowAll.DeepCopy()}
|
||||
networkPolicy.SetName("middleware-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
logger.Info("Update network policy", "name", networkPolicy.Name())
|
||||
}
|
||||
} else if owner, ok := ns.Labels[security.NamespaceOwnerLabel]; ok && owner != "" {
|
||||
// app namespace networkpolicy
|
||||
networkPolicy = security.NetworkPolicies{security.NPAppSpace.DeepCopy()}
|
||||
networkPolicy.SetName("app-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
logger.Info("Update network policy", "name", networkPolicy.Name(), "owner", owner)
|
||||
for i := range np.Spec.Ingress[0].From {
|
||||
if np.Spec.Ingress[0].From[i].NamespaceSelector != nil &&
|
||||
np.Spec.Ingress[0].From[i].NamespaceSelector.MatchLabels != nil {
|
||||
|
||||
if _, ok := np.Spec.Ingress[0].From[i].NamespaceSelector.MatchLabels[security.NamespaceOwnerLabel]; ok {
|
||||
np.Spec.Ingress[0].From[i].NamespaceSelector.MatchLabels[security.NamespaceOwnerLabel] = owner
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get app name from np namespace
|
||||
depApp, err := r.getAppInNs(np.Namespace, owner)
|
||||
if err != nil {
|
||||
logger.Info("get app info ", "name", np.Namespace, "err", err, "ignore to add app ref", owner)
|
||||
} else if depApp != nil {
|
||||
//
|
||||
if appRefs, ok := depApp.Spec.Settings["clusterAppRef"]; ok {
|
||||
|
||||
for _, app := range strings.Split(appRefs, ",") {
|
||||
if strings.HasSuffix(app, ".*") {
|
||||
// it's a app group
|
||||
group := strings.TrimSuffix(app, ".*")
|
||||
np.Spec.Ingress[0].From = append(np.Spec.Ingress[0].From, netv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
constants.ApplicationGroupClusterDep: group,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
np.Spec.Ingress[0].From = append(np.Spec.Ingress[0].From, netv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
constants.ApplicationClusterDep: app,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else if shared, ok := ns.Labels[security.NamespaceSharedLabel]; ok && shared != "false" {
|
||||
// shared namespace networkpolicy
|
||||
networkPolicy = security.NetworkPolicies{security.NPSharedSpace.DeepCopy(), security.NPSystemProvider.DeepCopy(), security.NPSharedEntrance.DeepCopy()}
|
||||
networkPolicy.SetName("shared-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
logger.Info("Update network policy", "name", networkPolicy.Name())
|
||||
// get app name from np namespace
|
||||
sharedRefAppName := ns.Labels[constants.ApplicationNameLabel]
|
||||
if sharedRefAppName == "" {
|
||||
logger.Info("No application name label found in shared namespace, skip adding app ref")
|
||||
return
|
||||
}
|
||||
|
||||
depAppMgr, err := r.tryToFindDependencyAppMgrOfSharedNamespace(ctx, ns, sharedRefAppName)
|
||||
if err != nil {
|
||||
logger.Info("get app mgr info ", "name", sharedRefAppName, "err", err, "ignore to add app ref", owner)
|
||||
} else if depAppMgr != nil {
|
||||
//add app himself to the network policy by default
|
||||
np.Spec.Ingress[0].From = append(np.Spec.Ingress[0].From, netv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
constants.ApplicationClusterDep: sharedRefAppName,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
var appConfig appcfg.ApplicationConfig
|
||||
if err := depAppMgr.GetAppConfig(&appConfig); err != nil {
|
||||
logger.Error(err, "Failed to get app config for shared app", "app", sharedRefAppName)
|
||||
return
|
||||
}
|
||||
|
||||
for _, app := range appConfig.AppScope.AppRef {
|
||||
if app == sharedRefAppName {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(app, ".*") {
|
||||
// it's a app group
|
||||
group := strings.TrimSuffix(app, ".*")
|
||||
np.Spec.Ingress[0].From = append(np.Spec.Ingress[0].From, netv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
constants.ApplicationGroupClusterDep: group,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
np.Spec.Ingress[0].From = append(np.Spec.Ingress[0].From, netv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
constants.ApplicationClusterDep: app,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
} // end of func npFix
|
||||
|
||||
} else {
|
||||
networkPolicy = security.NetworkPolicies{security.NPDenyAll.DeepCopy()}
|
||||
networkPolicy.SetName("others-np")
|
||||
networkPolicy.SetNamespace(ns.Name)
|
||||
npFix = func(np *netv1.NetworkPolicy) {
|
||||
logger.Info("Update network policy", "name", networkPolicy.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// add the namespace itself to the policy
|
||||
|
||||
if networkPolicy.Main().Spec.Ingress == nil {
|
||||
networkPolicy.Main().Spec.Ingress = []netv1.NetworkPolicyIngressRule{}
|
||||
}
|
||||
|
||||
if len(networkPolicy.Main().Spec.Ingress) == 0 {
|
||||
networkPolicy.Main().Spec.Ingress = append(networkPolicy.Main().Spec.Ingress, netv1.NetworkPolicyIngressRule{
|
||||
From: []netv1.NetworkPolicyPeer{},
|
||||
})
|
||||
}
|
||||
|
||||
if r.namespaceMustAdd(networkPolicy.Main(), ns) && networkPolicy.Name() != "middleware-np" {
|
||||
networkPolicy.Main().Spec.Ingress[0].From = append(networkPolicy.Main().Spec.Ingress[0].From, netv1.NetworkPolicyPeer{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"kubernetes.io/metadata.name": ns.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if err := r.createOrUpdateNetworkPolicy(
|
||||
ctx,
|
||||
networkPolicy.Main(),
|
||||
npFix,
|
||||
&networkPolicy,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, np := range networkPolicy.Additional() {
|
||||
if err := r.createOrUpdateNetworkPolicy(
|
||||
ctx,
|
||||
np,
|
||||
nil,
|
||||
&networkPolicy,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// delete network policy
|
||||
var networkPolicies netv1.NetworkPolicyList
|
||||
err := r.List(ctx, &networkPolicies, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, n := range networkPolicies.Items {
|
||||
if err := r.Delete(ctx, &n); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// remove finalizer
|
||||
ns.ObjectMeta.Finalizers = funk.FilterString(ns.ObjectMeta.Finalizers,
|
||||
func(item string) bool {
|
||||
return item != finalizer
|
||||
},
|
||||
)
|
||||
if err := r.Update(ctx, ns); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) findOwnerOfNamespace(ctx context.Context, ns *corev1.Namespace) (owner string, internal, system, shared, isMiddleware bool, err error) {
|
||||
appIsInternal := func(labels map[string]string, owner string) (internal, system, shared, isMiddleware bool, err error) {
|
||||
appName, ok := labels[constants.ApplicationNameLabel]
|
||||
if ok && appName != "" {
|
||||
appNamespace := fmt.Sprintf("%s-%s", appName, owner)
|
||||
//mgr, err := r.getAppMgrInNs(appNamespace, owner)
|
||||
mgr, err := r.getAppMgrByAppNameAndOwner(appName, owner)
|
||||
if err != nil {
|
||||
r.Logger.Error(err, "Failed to get app mgr in namespace", "namespace", ns.Name, "owner", owner)
|
||||
return false, false, false, false, err
|
||||
}
|
||||
|
||||
if mgr != nil {
|
||||
var cfg appcfg.ApplicationConfig
|
||||
err = mgr.GetAppConfig(&cfg)
|
||||
if err != nil {
|
||||
r.Logger.Error(err, "Failed to get app config for app", "app", appName)
|
||||
return false, false, false, false, err
|
||||
}
|
||||
if cfg.IsMiddleware() {
|
||||
isMiddleware = true
|
||||
}
|
||||
|
||||
system = cfg.AppScope.ClusterScoped && cfg.AppScope.SystemService
|
||||
shared := false
|
||||
for _, chart := range cfg.SubCharts {
|
||||
if chart.Namespace(owner) == ns.Name {
|
||||
if cfg.APIVersion == appcfg.V2 {
|
||||
if !chart.Shared {
|
||||
// V2: if the namespace is not cluster scoped, it cannot be considered as system app
|
||||
system = false
|
||||
} else {
|
||||
shared = true
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return cfg.Internal, system, shared, isMiddleware, nil
|
||||
} // end of mgr != nil
|
||||
|
||||
klog.Infof("App manager not found in namespace %s for owner %s", appNamespace, owner)
|
||||
}
|
||||
|
||||
return false, false, false, false, nil
|
||||
}
|
||||
|
||||
// get deployments installed by app installer
|
||||
var deployemnts appsv1.DeploymentList
|
||||
|
||||
if err := r.List(ctx, &deployemnts, client.InNamespace(ns.Name)); err == nil {
|
||||
for _, d := range deployemnts.Items {
|
||||
if d.GetLabels() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
owner, ok := d.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
if ok && owner != "" {
|
||||
runAsInternal, system, shared, isMiddleware, err := appIsInternal(d.GetLabels(), owner)
|
||||
if err != nil {
|
||||
return "", false, false, false, false, err
|
||||
}
|
||||
return owner, runAsInternal, system, shared, isMiddleware, nil
|
||||
}
|
||||
} // end loop deployment.Items
|
||||
}
|
||||
|
||||
// try to get statefulset
|
||||
var statefulSets appsv1.StatefulSetList
|
||||
if err := r.List(ctx, &statefulSets, client.InNamespace(ns.Name)); err == nil {
|
||||
for _, d := range statefulSets.Items {
|
||||
if d.GetLabels() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
owner, ok := d.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
if ok && owner != "" {
|
||||
runAsInternal, system, shared, isMiddleware, err := appIsInternal(d.GetLabels(), owner)
|
||||
if err != nil {
|
||||
return "", false, false, false, false, err
|
||||
}
|
||||
return owner, runAsInternal, system, shared, isMiddleware, nil
|
||||
}
|
||||
} // end loop sts.Items
|
||||
}
|
||||
|
||||
// try to get argo workflow
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: "argoproj.io",
|
||||
Version: "v1alpha1",
|
||||
Resource: "cronworkflows",
|
||||
}
|
||||
|
||||
if workflows, err := r.DynamicClient.Resource(gvr).Namespace(ns.Name).List(ctx, metav1.ListOptions{}); err == nil {
|
||||
for _, w := range workflows.Items {
|
||||
if w.GetLabels() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
owner, ok := w.GetLabels()[constants.WorkflowOwnerLabel]
|
||||
if ok && owner != "" {
|
||||
runAsInternal, system, shared, _, err := appIsInternal(w.GetLabels(), owner)
|
||||
if err != nil {
|
||||
return "", false, false, false, false, err
|
||||
}
|
||||
return owner, runAsInternal, system, shared, isMiddleware, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gvr = schema.GroupVersionResource{
|
||||
Group: "apps.kubeblocks.io",
|
||||
Version: "v1",
|
||||
Resource: "clusters",
|
||||
}
|
||||
if middlewares, err := r.DynamicClient.Resource(gvr).Namespace(ns.Name).List(ctx, metav1.ListOptions{}); err == nil {
|
||||
for _, w := range middlewares.Items {
|
||||
if w.GetLabels() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
owner, ok := w.GetLabels()[constants.ApplicationOwnerLabel]
|
||||
if ok && owner != "" {
|
||||
runAsInternal, system, shared, isMiddleware, err := appIsInternal(w.GetLabels(), owner)
|
||||
if err != nil {
|
||||
return "", false, false, false, false, err
|
||||
}
|
||||
return owner, runAsInternal, system, shared, isMiddleware, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
klog.Infof("No owner found in workload for namespace %s", ns.Name)
|
||||
if appName, ok := ns.Labels[constants.ApplicationNameLabel]; ok && appName != "" {
|
||||
// if the namespace is labeled with application name,
|
||||
// find the application manager from the one of user
|
||||
var appMgrs v1alpha1.ApplicationManagerList
|
||||
if err := r.List(ctx, &appMgrs); err == nil {
|
||||
for _, appMgr := range appMgrs.Items {
|
||||
if appMgr.Spec.AppName == appName {
|
||||
owner := appMgr.Spec.AppOwner
|
||||
runAsInternal, system, shared, isMiddleware, err := appIsInternal(ns.Labels, owner)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get app manager %s in namespace %s: %v", appMgr.Name, ns.Name, err)
|
||||
return "", false, false, false, false, err
|
||||
}
|
||||
|
||||
// should not return the owner, it should be the shared namespace
|
||||
return "", runAsInternal, system, shared, isMiddleware, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
klog.Infof("No owner found in namespace %s", ns.Name)
|
||||
return "", false, false, false, isMiddleware, nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) tryToFindDependencyAppMgrOfSharedNamespace(ctx context.Context, ns *corev1.Namespace, sharedRefAppName string) (*v1alpha1.ApplicationManager, error) {
|
||||
// try to find the dependency app in the namespace
|
||||
owner := ns.Labels[constants.ApplicationInstallUserLabel]
|
||||
|
||||
namespace := fmt.Sprintf("%s-%s", sharedRefAppName, owner)
|
||||
depApp, err := r.getAppMgrInNs(namespace, owner)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.Info("dependency app not found in install user's app , try to find in other admin user, ", sharedRefAppName)
|
||||
var appMgrs v1alpha1.ApplicationManagerList
|
||||
if err := r.List(ctx, &appMgrs); err == nil {
|
||||
for _, appMgr := range appMgrs.Items {
|
||||
if appMgr.Spec.AppName == sharedRefAppName && appMgr.Spec.AppOwner != owner {
|
||||
return depApp, nil
|
||||
}
|
||||
} // end of loop appMgrs.Items
|
||||
} else {
|
||||
klog.Error(err, "Failed to list application managers")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} // end of if !apierrors.IsNotFound(err)
|
||||
|
||||
klog.Error("failed to get dependency app manager in namespace, ", namespace, " err: ", err)
|
||||
} // end of if err != nil
|
||||
|
||||
return depApp, nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) namespaceMustAdd(networkPolicy *netv1.NetworkPolicy, ns *corev1.Namespace) bool {
|
||||
for _, i := range networkPolicy.Spec.Ingress {
|
||||
for _, f := range i.From {
|
||||
if f.NamespaceSelector != nil && f.NamespaceSelector.MatchLabels != nil {
|
||||
if v, ok := f.NamespaceSelector.MatchLabels["kubernetes.io/metadata.name"]; ok && v == ns.Name {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) namespacesShouldAllowNodeTunnel(ctx context.Context) ([]reconcile.Request, error) {
|
||||
schemeGroupVersionResource := schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "users"}
|
||||
users, err := r.DynamicClient.Resource(schemeGroupVersionResource).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
r.Logger.Error(err, "Failed to list user")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqs := []reconcile.Request{
|
||||
{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: "os-network",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, u := range users.Items {
|
||||
reqs = append(reqs, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: "user-space-" + u.GetName(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return reqs, nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) getAppInNs(ns, owner string) (*v1alpha1.Application, error) {
|
||||
appName := getAppNameFromNPName(ns, owner)
|
||||
|
||||
if len(appName) > 0 {
|
||||
appName = fmt.Sprintf("%s-%s", ns, appName)
|
||||
key := types.NamespacedName{Name: appName}
|
||||
var depApp v1alpha1.Application
|
||||
err := r.Get(context.Background(), key, &depApp)
|
||||
if err != nil {
|
||||
r.Logger.Info("Get app info ", "name", appName, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &depApp, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) getAppMgrInNs(ns, owner string) (*v1alpha1.ApplicationManager, error) {
|
||||
appName := getAppNameFromNPName(ns, owner)
|
||||
|
||||
if len(appName) > 0 {
|
||||
appName = fmt.Sprintf("%s-%s", ns, appName)
|
||||
key := types.NamespacedName{Name: appName}
|
||||
var depAppMgr v1alpha1.ApplicationManager
|
||||
err := r.Get(context.Background(), key, &depAppMgr)
|
||||
if err != nil {
|
||||
r.Logger.Info("Get app manager ", "name", appName, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &depAppMgr, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *SecurityReconciler) getAppMgrByAppNameAndOwner(appName, owner string) (*v1alpha1.ApplicationManager, error) {
|
||||
var amList v1alpha1.ApplicationManagerList
|
||||
err := r.List(context.TODO(), &amList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, am := range amList.Items {
|
||||
if am.Spec.AppName == appName && am.Spec.AppOwner == owner {
|
||||
return &am, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func isNodeChanged(obj ...metav1.Object) bool {
|
||||
o := obj[0]
|
||||
// network policy should be reconciled when nodes are changed
|
||||
if _, ok := o.(*corev1.Node); ok {
|
||||
if len(obj) > 1 {
|
||||
o1 := obj[0].(*corev1.Node)
|
||||
o2 := obj[1].(*corev1.Node)
|
||||
|
||||
return o1.Annotations[utils.CalicoTunnelAddrAnnotation] != o2.Annotations[utils.CalicoTunnelAddrAnnotation]
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getAppNameFromNPName(ns string, owner string) string {
|
||||
if !strings.HasPrefix(ns, "user-space") &&
|
||||
!strings.HasPrefix(ns, "user-system") &&
|
||||
strings.HasSuffix(ns, "-"+owner) {
|
||||
return ns[:len(ns)-len(owner)-1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
12
framework/app-service/controllers/suite_test.go
Normal file
12
framework/app-service/controllers/suite_test.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "app manager controllers suite")
|
||||
}
|
||||
110
framework/app-service/controllers/systemenv_controller.go
Normal file
110
framework/app-service/controllers/systemenv_controller.go
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type SystemEnvController struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=systemenvs,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=systemenvs/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=appenvs,verbs=get;list;watch;update;patch
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=appenvs/status,verbs=get;update;patch
|
||||
|
||||
func (r *SystemEnvController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&sysv1alpha1.SystemEnv{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *SystemEnvController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.Infof("Reconciling SystemEnv: %s", req.NamespacedName)
|
||||
|
||||
var systemEnv sysv1alpha1.SystemEnv
|
||||
if err := r.Get(ctx, req.NamespacedName, &systemEnv); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return r.reconcileSystemEnv(ctx, &systemEnv)
|
||||
}
|
||||
|
||||
func (r *SystemEnvController) reconcileSystemEnv(ctx context.Context, systemEnv *sysv1alpha1.SystemEnv) (ctrl.Result, error) {
|
||||
klog.Infof("Processing SystemEnv change: %s", systemEnv.EnvName)
|
||||
|
||||
var appEnvList sysv1alpha1.AppEnvList
|
||||
if err := r.List(ctx, &appEnvList); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to list AppEnvs: %v", err)
|
||||
}
|
||||
|
||||
refCount := 0
|
||||
annotatedCount := 0
|
||||
failedCount := 0
|
||||
|
||||
for i := range appEnvList.Items {
|
||||
appEnv := &appEnvList.Items[i]
|
||||
if r.isReferenced(appEnv, systemEnv.EnvName) {
|
||||
refCount++
|
||||
|
||||
annotated, err := r.annotateAppEnvForSync(ctx, appEnv, systemEnv)
|
||||
if annotated {
|
||||
annotatedCount++
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to annotate AppEnv %s/%s for sync: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
failedCount++
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if refCount > 0 {
|
||||
klog.Infof("SystemEnv %s reconciliation completed: %d total references, %d annotated for sync, %d failed",
|
||||
systemEnv.EnvName, refCount, annotatedCount, failedCount)
|
||||
}
|
||||
|
||||
if failedCount > 0 {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to annotate %d AppEnvs referencing environment variable %s", failedCount, systemEnv.EnvName)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *SystemEnvController) isReferenced(appEnv *sysv1alpha1.AppEnv, systemEnvName string) bool {
|
||||
for _, envVar := range appEnv.Envs {
|
||||
if envVar.ValueFrom != nil && envVar.ValueFrom.EnvName == systemEnvName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *SystemEnvController) annotateAppEnvForSync(ctx context.Context, appEnv *sysv1alpha1.AppEnv, systemEnv *sysv1alpha1.SystemEnv) (bool, error) {
|
||||
// Check if annotation already exists
|
||||
if appEnv.Annotations != nil && appEnv.Annotations[constants.AppEnvSyncAnnotation] != "" {
|
||||
klog.V(4).Infof("AppEnv %s/%s already has sync annotation, skipping", appEnv.Namespace, appEnv.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Add annotation to trigger AppEnvController sync
|
||||
original := appEnv.DeepCopy()
|
||||
if appEnv.Annotations == nil {
|
||||
appEnv.Annotations = make(map[string]string)
|
||||
}
|
||||
appEnv.Annotations[constants.AppEnvSyncAnnotation] = systemEnv.EnvName
|
||||
|
||||
klog.Infof("Annotating AppEnv %s/%s for sync due to environment variable %s change",
|
||||
appEnv.Namespace, appEnv.Name, systemEnv.EnvName)
|
||||
|
||||
return true, r.Patch(ctx, appEnv, client.MergeFrom(original))
|
||||
}
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// SystemEnvProcessEnvController only handles syncing SystemEnv values into the
|
||||
// current process environment, supporting legacy aliases for compatibility.
|
||||
type SystemEnvProcessEnvController struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// legacyEnvAliases maintains backward-compatible aliases for system environment variables
|
||||
// during the migration period. Keys are new env names, values are a single legacy name
|
||||
// that should mirror the same value in the process environment.
|
||||
var legacyEnvAliases = map[string]string{
|
||||
"OLARES_SYSTEM_ROOT_PATH": "OLARES_ROOT_DIR",
|
||||
"OLARES_SYSTEM_ROOTFS_TYPE": "OLARES_FS_TYPE",
|
||||
"OLARES_SYSTEM_CUDA_VERSION": "CUDA_VERSION",
|
||||
}
|
||||
|
||||
const migrationAnnotationKey = "sys.bytetrade.io/systemenv-migrated"
|
||||
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=systemenvs,verbs=get;list;watch
|
||||
|
||||
func (r *SystemEnvProcessEnvController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named("systemenv-processenv").
|
||||
For(&sysv1alpha1.SystemEnv{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *SystemEnvProcessEnvController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.Infof("Reconciling SystemEnv for process env: %s", req.NamespacedName)
|
||||
|
||||
var systemEnv sysv1alpha1.SystemEnv
|
||||
if err := r.Get(ctx, req.NamespacedName, &systemEnv); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
value := systemEnv.GetEffectiveValue()
|
||||
if err := setEnvAndAlias(systemEnv.EnvName, value); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// setEnvAndAlias sets the given env name and all of its legacy aliases
|
||||
// in the current process environment. Returns an error if any setenv fails.
|
||||
func setEnvAndAlias(envName, value string) error {
|
||||
if value == "" {
|
||||
klog.V(4).Infof("Skip setting env %s: empty effective value", envName)
|
||||
return nil
|
||||
}
|
||||
if err := os.Setenv(envName, value); err != nil {
|
||||
return fmt.Errorf("setenv %s failed: %w", envName, err)
|
||||
}
|
||||
klog.V(4).Infof("Set env %s", envName)
|
||||
if alias, ok := legacyEnvAliases[envName]; ok && alias != "" {
|
||||
if err := os.Setenv(alias, value); err != nil {
|
||||
return fmt.Errorf("setenv legacy alias %s for %s failed: %w", alias, envName, err)
|
||||
}
|
||||
klog.V(4).Infof("Set legacy env %s (alias of %s)", alias, envName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitializeSystemEnvProcessEnv(ctx context.Context, c client.Client) error {
|
||||
var list sysv1alpha1.SystemEnvList
|
||||
if err := c.List(ctx, &list); err != nil {
|
||||
return fmt.Errorf("failed to list SystemEnvs: %v", err)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
var domainName string
|
||||
var once sync.Once
|
||||
for i := range list.Items {
|
||||
se := &list.Items[i]
|
||||
|
||||
migrated := se.Annotations != nil && se.Annotations[migrationAnnotationKey] == "true"
|
||||
if !migrated {
|
||||
if alias, ok := legacyEnvAliases[se.EnvName]; ok && alias != "" {
|
||||
if legacyVal, ok := os.LookupEnv(alias); ok && legacyVal != "" {
|
||||
if err := se.ValidateValue(legacyVal); err != nil {
|
||||
klog.Warningf("Skip migrating SystemEnv %s: legacy alias %s value invalid for type %s: %v", se.EnvName, alias, se.Type, err)
|
||||
} else if se.Default != legacyVal {
|
||||
original := se.DeepCopy()
|
||||
se.Default = legacyVal
|
||||
if se.Annotations == nil {
|
||||
se.Annotations = make(map[string]string)
|
||||
}
|
||||
se.Annotations[migrationAnnotationKey] = "true"
|
||||
if err := c.Patch(ctx, se, client.MergeFrom(original)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("patch SystemEnv %s default from legacy alias failed: %w", se.EnvName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
once.Do(func() {
|
||||
sysCR := &sysv1alpha1.Terminus{}
|
||||
err = c.Get(ctx, client.ObjectKey{Name: "terminus"}, sysCR)
|
||||
if err != nil {
|
||||
klog.Errorf("get terminus failed: %v", err)
|
||||
return
|
||||
}
|
||||
domainName = sysCR.Spec.Settings["domainName"]
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("get terminus failed: %w", err)
|
||||
}
|
||||
|
||||
var isCNDomain bool
|
||||
if strings.HasSuffix(domainName, ".cn") {
|
||||
isCNDomain = true
|
||||
}
|
||||
var newDefaultVal string
|
||||
switch se.EnvName {
|
||||
case "OLARES_SYSTEM_DOCKERHUB_SERVICE":
|
||||
newDefaultVal = "https://mirrors.olares.com"
|
||||
if isCNDomain {
|
||||
newDefaultVal = "https://mirrors.olares.cn"
|
||||
}
|
||||
case "OLARES_SYSTEM_REMOTE_SERVICE":
|
||||
newDefaultVal = "https://api.olares.com"
|
||||
if isCNDomain {
|
||||
newDefaultVal = "https://api.olares.cn"
|
||||
}
|
||||
case "OLARES_SYSTEM_CDN_SERVICE":
|
||||
newDefaultVal = "https://cdn.olares.com"
|
||||
if isCNDomain {
|
||||
newDefaultVal = "https://cdn.olares.cn"
|
||||
}
|
||||
|
||||
}
|
||||
if newDefaultVal != "" && se.Default != newDefaultVal {
|
||||
original := se.DeepCopy()
|
||||
se.Default = newDefaultVal
|
||||
if se.Annotations == nil {
|
||||
se.Annotations = make(map[string]string)
|
||||
}
|
||||
se.Annotations[migrationAnnotationKey] = "true"
|
||||
if err := c.Patch(ctx, se, client.MergeFrom(original)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("patch SystemEnv %s default failed: %w", se.EnvName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := setEnvAndAlias(se.EnvName, se.GetEffectiveValue()); err != nil {
|
||||
errs = append(errs, fmt.Errorf("set process env for %s failed: %w", se.EnvName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
305
framework/app-service/controllers/tailscale_acl_controller.go
Normal file
305
framework/app-service/controllers/tailscale_acl_controller.go
Normal file
|
|
@ -0,0 +1,305 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
tailScaleACLPolicyMd5Key = "tailscale-acl-md5"
|
||||
tailScaleDeployOrContainerName = "tailscale"
|
||||
subnetRoutesEnv = "TS_ROUTES"
|
||||
)
|
||||
|
||||
var defaultACLs = []v1alpha1.ACL{
|
||||
{
|
||||
Action: "accept",
|
||||
Src: []string{"*"},
|
||||
Proto: "tcp",
|
||||
Dst: []string{"*:443"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Src: []string{"*"},
|
||||
Proto: "tcp",
|
||||
Dst: []string{"*:18088"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Src: []string{"*"},
|
||||
Proto: "",
|
||||
Dst: []string{"*:53"},
|
||||
},
|
||||
{
|
||||
Action: "accept",
|
||||
Src: []string{"*"},
|
||||
Proto: "",
|
||||
Dst: []string{"*:80"},
|
||||
},
|
||||
}
|
||||
var defaultSubRoutes = []string{"$(COREDNS_SVC)/32"}
|
||||
|
||||
type ACLPolicy struct {
|
||||
ACLs []v1alpha1.ACL `json:"acls"`
|
||||
AutoApprovers AutoApprovers `json:"autoApprovers"`
|
||||
}
|
||||
|
||||
type AutoApprovers struct {
|
||||
Routes map[string][]string `json:"routes"`
|
||||
ExitNode []string `json:"exitNode"`
|
||||
}
|
||||
|
||||
type TailScaleACLController struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func (r *TailScaleACLController) SetUpWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("app's tailscale acls manager controller", mgr, controller.Options{
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&v1alpha1.Application{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, app *v1alpha1.Application) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: app.Name,
|
||||
Namespace: app.Spec.Owner,
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*v1alpha1.Application]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*v1alpha1.Application]) bool {
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*v1alpha1.Application]) bool {
|
||||
return true
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*v1alpha1.Application]) bool {
|
||||
return true
|
||||
},
|
||||
},
|
||||
))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TailScaleACLController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
klog.Infof("reconcile tailscale acls subroutes request name=%v, owner=%v", req.Name, req.Namespace)
|
||||
owner := req.Namespace
|
||||
|
||||
// for this request req.Namespace is owner
|
||||
// list all apps by owner and generate acls by owner
|
||||
var apps v1alpha1.ApplicationList
|
||||
err := r.List(ctx, &apps)
|
||||
if err != nil {
|
||||
klog.Errorf("list applications failed: %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
filteredApps := make([]v1alpha1.Application, 0)
|
||||
for _, app := range apps.Items {
|
||||
if app.Spec.Owner != owner {
|
||||
continue
|
||||
}
|
||||
filteredApps = append(filteredApps, app)
|
||||
}
|
||||
|
||||
sort.Slice(filteredApps, func(i, j int) bool {
|
||||
return filteredApps[j].CreationTimestamp.Before(&filteredApps[i].CreationTimestamp)
|
||||
})
|
||||
|
||||
tailScaleACLConfig := "tailscale-acl"
|
||||
headScaleNamespace := fmt.Sprintf("user-space-%s", owner)
|
||||
|
||||
// calculate acls
|
||||
acls := make([]v1alpha1.ACL, 0)
|
||||
subRoutes := make([]string, 0)
|
||||
routeSet := sets.NewString()
|
||||
|
||||
subRoutes = append(subRoutes, defaultSubRoutes...)
|
||||
for _, app := range filteredApps {
|
||||
acls = append(acls, app.Spec.TailScale.ACLs...)
|
||||
// just to maintain compatibility with existing application
|
||||
acls = append(acls, app.Spec.TailScaleACLs...)
|
||||
for _, subRoute := range app.Spec.TailScale.SubRoutes {
|
||||
if routeSet.Has(subRoute) {
|
||||
continue
|
||||
}
|
||||
subRoutes = append(subRoutes, subRoute)
|
||||
routeSet.Insert(subRoute)
|
||||
}
|
||||
}
|
||||
|
||||
tailScaleDeploy := &appsv1.Deployment{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: tailScaleDeployOrContainerName, Namespace: headScaleNamespace}, tailScaleDeploy)
|
||||
if err != nil {
|
||||
klog.Errorf("get tailscale deploy failed: %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
tailScaleRouteEnv := ""
|
||||
for _, container := range tailScaleDeploy.Spec.Template.Spec.Containers {
|
||||
if container.Name != tailScaleDeployOrContainerName {
|
||||
continue
|
||||
}
|
||||
for _, env := range container.Env {
|
||||
if env.Name == subnetRoutesEnv {
|
||||
tailScaleRouteEnv = env.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oldTailScaleRoutes := strings.Split(tailScaleRouteEnv, ",")
|
||||
klog.Infof("oldTailScaleRoutes: %v", oldTailScaleRoutes)
|
||||
klog.Infof("new sub Routes: %v", subRoutes)
|
||||
|
||||
if !isTsRoutesEqual(oldTailScaleRoutes, subRoutes) {
|
||||
newTailScaleRoutesEnv := strings.Join(subRoutes, ",")
|
||||
containers := tailScaleDeploy.Spec.Template.Spec.Containers
|
||||
for i := range containers {
|
||||
if containers[i].Name != tailScaleDeployOrContainerName {
|
||||
continue
|
||||
}
|
||||
for j := range containers[i].Env {
|
||||
if containers[i].Env[j].Name == subnetRoutesEnv {
|
||||
containers[i].Env[j].Value = newTailScaleRoutesEnv
|
||||
}
|
||||
}
|
||||
}
|
||||
err = r.Update(ctx, tailScaleDeploy)
|
||||
if err != nil {
|
||||
klog.Errorf("update tailscale deploy failed %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: tailScaleACLConfig, Namespace: headScaleNamespace}, configMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// If no ACLs need to be applied and the ConfigMap tailscale-acl has not been updated by the Tailscale ACL controller,
|
||||
// there is no need to update.
|
||||
if len(acls) == 0 && (configMap.Annotations == nil || (configMap.Annotations != nil && configMap.Annotations[tailScaleACLPolicyMd5Key] == "")) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
aclPolicyByte, err := makeACLPolicy(acls)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("aclPolicyByte:string: %s", string(aclPolicyByte))
|
||||
oldTailScaleACLPolicyMd5Sum := ""
|
||||
if configMap.Annotations != nil {
|
||||
oldTailScaleACLPolicyMd5Sum = configMap.Annotations[tailScaleACLPolicyMd5Key]
|
||||
}
|
||||
curTailScaleACLPolicyMd5Sum := utils.Md5String(string(aclPolicyByte))
|
||||
|
||||
if curTailScaleACLPolicyMd5Sum != oldTailScaleACLPolicyMd5Sum {
|
||||
if configMap.Annotations == nil {
|
||||
configMap.Annotations = make(map[string]string)
|
||||
}
|
||||
if configMap.Data == nil {
|
||||
configMap.Data = make(map[string]string)
|
||||
}
|
||||
|
||||
configMap.Annotations[tailScaleACLPolicyMd5Key] = curTailScaleACLPolicyMd5Sum
|
||||
configMap.Data["acl.json"] = string(aclPolicyByte)
|
||||
err = r.Update(ctx, configMap)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
deploy := &appsv1.Deployment{}
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: headScaleNamespace, Name: "headscale"}, deploy)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
headScaleACLMd5 := ""
|
||||
if deploy.Spec.Template.Annotations != nil {
|
||||
klog.Infof("headscaleaclmd5..: %s", deploy.Spec.Template.Annotations[tailScaleACLPolicyMd5Key])
|
||||
headScaleACLMd5 = deploy.Spec.Template.Annotations[tailScaleACLPolicyMd5Key]
|
||||
}
|
||||
klog.Infof("oldheadscaleACLmd5: %v, newmd5: %v", headScaleACLMd5, curTailScaleACLPolicyMd5Sum)
|
||||
if headScaleACLMd5 != curTailScaleACLPolicyMd5Sum {
|
||||
if deploy.Spec.Template.Annotations == nil {
|
||||
deploy.Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
// update headscale deploy template annotations for rolling update
|
||||
deploy.Spec.Template.Annotations[tailScaleACLPolicyMd5Key] = curTailScaleACLPolicyMd5Sum
|
||||
err = r.Update(ctx, deploy)
|
||||
if err != nil {
|
||||
klog.Errorf("update headscale deploy failed: %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("rolling update headscale...")
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func makeACLPolicy(acls []v1alpha1.ACL) ([]byte, error) {
|
||||
acls = append(acls, defaultACLs...)
|
||||
for i := range acls {
|
||||
acls[i].Action = "accept"
|
||||
acls[i].Src = []string{"*"}
|
||||
}
|
||||
aclPolicy := ACLPolicy{
|
||||
ACLs: acls,
|
||||
AutoApprovers: AutoApprovers{
|
||||
Routes: map[string][]string{
|
||||
"10.0.0.0/8": {"default"},
|
||||
"172.16.0.0/12": {"default"},
|
||||
"192.168.0.0/16": {"default"},
|
||||
},
|
||||
ExitNode: []string{},
|
||||
},
|
||||
}
|
||||
aclPolicyByte, err := json.Marshal(aclPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aclPolicyByte, nil
|
||||
}
|
||||
|
||||
func isTsRoutesEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(a)
|
||||
sort.Strings(b)
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
22
framework/app-service/controllers/types.go
Normal file
22
framework/app-service/controllers/types.go
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
package controllers
|
||||
|
||||
const (
|
||||
applicationSettingsPolicyKey = "policy"
|
||||
namespaceFinalizer = "finalizers.bytetrade.io/namespaces"
|
||||
userFinalizer = "finalizers.bytetrade.io/users"
|
||||
creator = "bytetrade.io/creator"
|
||||
)
|
||||
|
||||
type applicationSettingsSubPolicy struct {
|
||||
URI string `json:"uri"`
|
||||
Policy string `json:"policy"`
|
||||
OneTime bool `json:"one_time"`
|
||||
Duration int32 `json:"valid_duration"`
|
||||
}
|
||||
|
||||
type applicationSettingsPolicy struct {
|
||||
DefaultPolicy string `json:"default_policy"`
|
||||
SubPolicies []*applicationSettingsSubPolicy `json:"sub_policies"`
|
||||
OneTime bool `json:"one_time"`
|
||||
Duration int32 `json:"valid_duration"`
|
||||
}
|
||||
765
framework/app-service/controllers/user_controller.go
Normal file
765
framework/app-service/controllers/user_controller.go
Normal file
|
|
@ -0,0 +1,765 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/users"
|
||||
"bytetrade.io/web3os/app-service/pkg/users/userspace/v1"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils/sliceutil"
|
||||
|
||||
iamv1alpha2 "github.com/beclab/api/iam/v1alpha2"
|
||||
"github.com/beclab/lldap-client/pkg/cache/memory"
|
||||
lclient "github.com/beclab/lldap-client/pkg/client"
|
||||
lconfig "github.com/beclab/lldap-client/pkg/config"
|
||||
lapierrors "github.com/beclab/lldap-client/pkg/errors"
|
||||
"github.com/beclab/lldap-client/pkg/generated"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
utilwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
applyCorev1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
applyMetav1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
needSyncToLLdapAna = "iam.kubesphere.io/sync-to-lldap"
|
||||
syncedToLLdapAna = "iam.kubesphere.io/synced-to-lldap"
|
||||
userIndexAna = "bytetrade.io/user-index"
|
||||
interval = time.Second
|
||||
timeout = 15 * time.Second
|
||||
)
|
||||
|
||||
// UserController reconciles a User object
|
||||
type UserController struct {
|
||||
client.Client
|
||||
KubeConfig *rest.Config
|
||||
LLdapClient *lclient.Client
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *UserController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
c, err := controller.New("user-controller", mgr, controller.Options{
|
||||
MaxConcurrentReconciles: 1,
|
||||
Reconciler: r,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("user-controller setup failed %v", err)
|
||||
return fmt.Errorf("user-controller setup failed %w", err)
|
||||
}
|
||||
|
||||
err = c.Watch(source.Kind(
|
||||
mgr.GetCache(),
|
||||
&iamv1alpha2.User{},
|
||||
handler.TypedEnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, user *iamv1alpha2.User) []reconcile.Request {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{
|
||||
Name: user.GetName(),
|
||||
}}}
|
||||
}),
|
||||
predicate.TypedFuncs[*iamv1alpha2.User]{
|
||||
CreateFunc: func(e event.TypedCreateEvent[*iamv1alpha2.User]) bool {
|
||||
obj := e.Object
|
||||
if obj.Status.State == "Failed" {
|
||||
return false
|
||||
}
|
||||
klog.Infof("create enque name: %s, state: %s", obj.Name, obj.Status.State)
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.TypedUpdateEvent[*iamv1alpha2.User]) bool {
|
||||
oldObj := e.ObjectOld
|
||||
newObj := e.ObjectNew
|
||||
oldObj.Spec.InitialPassword = newObj.Spec.InitialPassword
|
||||
|
||||
isDeletionUpdate := newObj.DeletionTimestamp != nil
|
||||
specChanged := !reflect.DeepEqual(oldObj.Spec, newObj.Spec)
|
||||
|
||||
shouldReconcile := isDeletionUpdate || specChanged
|
||||
return shouldReconcile
|
||||
//return true
|
||||
},
|
||||
DeleteFunc: func(e event.TypedDeleteEvent[*iamv1alpha2.User]) bool {
|
||||
return true
|
||||
},
|
||||
},
|
||||
))
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("user-controller add watch failed %v", err)
|
||||
return fmt.Errorf("add watch failed %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop
|
||||
func (r *UserController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.Infof("start reconcile user %s", req.Name)
|
||||
|
||||
// Fetch the User instance
|
||||
user := &iamv1alpha2.User{}
|
||||
err := r.Get(ctx, req.NamespacedName, user)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// User was deleted, handle cleanup if needed
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if user.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !sliceutil.HasString(user.Finalizers, userFinalizer) {
|
||||
user.ObjectMeta.Finalizers = append(user.ObjectMeta.Finalizers, userFinalizer)
|
||||
if updateErr := r.Update(ctx, user); updateErr != nil {
|
||||
klog.Errorf("failed to update user %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The object is being deleted
|
||||
if sliceutil.HasString(user.ObjectMeta.Finalizers, userFinalizer) {
|
||||
if r.LLdapClient != nil {
|
||||
if err = r.waitForDeleteFromLLDAP(user.Name); err != nil {
|
||||
klog.Infof("wait for delete user from lldap failed %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
if err = r.deleteRoleBindings(ctx, user); err != nil {
|
||||
klog.V(0).Infof("delete rolebinding failed %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
err = r.handleUserDeletion(ctx, user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete user resource %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
user.Finalizers = sliceutil.RemoveString(user.ObjectMeta.Finalizers, func(item string) bool {
|
||||
return item == userFinalizer
|
||||
})
|
||||
if updateErr := r.Update(ctx, user, &client.UpdateOptions{}); updateErr != nil {
|
||||
klog.Infof("update user failed %v", updateErr)
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
utils.PublishUserEvent("Delete", user.Name, user.Annotations[users.AnnotationUserDeleter])
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if r.LLdapClient == nil {
|
||||
lldapClient, err := r.getLLdapClient()
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
r.LLdapClient = lldapClient
|
||||
}
|
||||
|
||||
if r.LLdapClient != nil {
|
||||
if err = r.waitForSyncToLLDAP(user); err != nil {
|
||||
klog.V(0).Infof("wait for sync to lldap failed %v", err)
|
||||
return ctrl.Result{RequeueAfter: time.Second}, nil
|
||||
}
|
||||
klog.V(0).Infof("user %s sync to lldap successes", user.Name)
|
||||
}
|
||||
|
||||
if user.Status.State == "" || user.Status.State == "Creating" {
|
||||
ret, err := r.handleUserCreation(ctx, user)
|
||||
time.Sleep(time.Second)
|
||||
return ret, err
|
||||
}
|
||||
klog.Infof("finish reconcile user %s", req.Name)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *UserController) deleteRoleBindings(ctx context.Context, user *iamv1alpha2.User) error {
|
||||
if len(user.Name) > validation.LabelValueMaxLength {
|
||||
// ignore invalid label value error
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterRoleBinding := &rbacv1.ClusterRoleBinding{}
|
||||
err := r.Client.DeleteAllOf(ctx, clusterRoleBinding, client.MatchingLabels{iamv1alpha2.UserReferenceLabel: user.Name})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete all of clusterrolebinding %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
roleBindingList := &rbacv1.RoleBindingList{}
|
||||
err = r.Client.List(ctx, roleBindingList, client.MatchingLabels{iamv1alpha2.UserReferenceLabel: user.Name})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get rolebindinglist %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, roleBinding := range roleBindingList.Items {
|
||||
err = r.Client.Delete(ctx, &roleBinding)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete rolebinding %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) handleUserCreation(ctx context.Context, user *iamv1alpha2.User) (ctrl.Result, error) {
|
||||
klog.Infof("starting user creation for %s", user.Name)
|
||||
|
||||
// Update status to Creating
|
||||
if user.Status.State != "Creating" {
|
||||
err := r.updateUserStatus(ctx, user, "Creating", "Starting user creation process")
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update user status to Created %v", err)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check cluster pod capacity
|
||||
klog.Infof("start check cluster pod capacity.....")
|
||||
isSatisfied, err := r.checkClusterPodCapacity(ctx)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("failed to check cluster capacity %v", err)
|
||||
klog.Error(message)
|
||||
updateErr := r.updateUserStatus(ctx, user, "Failed", message)
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user status to Created %v", updateErr)
|
||||
}
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
if !isSatisfied {
|
||||
updateErr := r.updateUserStatus(ctx, user, "Failed", "Insufficient pods can allocate in the cluster")
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user status to Failed %v", updateErr)
|
||||
}
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
|
||||
// Validate resource limits
|
||||
klog.Infof("start to validate resource limits.....")
|
||||
|
||||
err = r.validateResourceLimits(user)
|
||||
// invalid resource limit, no need to requeue
|
||||
if err != nil {
|
||||
klog.Errorf("failed to validate resource limits %v", err)
|
||||
updateErr := r.updateUserStatus(ctx, user, "Failed", err.Error())
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user status: %v", updateErr)
|
||||
}
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
|
||||
klog.Infof("start to checkResource.....")
|
||||
|
||||
err = r.checkResource(user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to checkResource %v", err)
|
||||
updateErr := r.updateUserStatus(ctx, user, "Failed", err.Error())
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user status to Failed %v", updateErr)
|
||||
}
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
|
||||
// Create user resources
|
||||
err = r.createUserResources(ctx, user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create user resource %v", err)
|
||||
updateErr := r.updateUserStatus(ctx, user, "Failed", fmt.Sprintf("Failed to create user resources: %v", err))
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user status: %v", updateErr)
|
||||
}
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
klog.Infof("create user resource success: %s", user.Name)
|
||||
updateErr := r.updateUserStatus(ctx, user, "Created", "Created user success")
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user status to Created %v", updateErr)
|
||||
} else {
|
||||
klog.Infof("publish user creation event.....")
|
||||
utils.PublishUserEvent("Create", user.Name, user.Annotations[users.AnnotationUserCreator])
|
||||
}
|
||||
return ctrl.Result{}, updateErr
|
||||
}
|
||||
|
||||
func (r *UserController) checkResource(user *iamv1alpha2.User) error {
|
||||
metrics, _, err := apputils.GetClusterResource("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memoryLimit := user.Annotations[users.UserAnnotationLimitsMemoryKey]
|
||||
|
||||
memory, _ := resource.ParseQuantity(memoryLimit)
|
||||
if memory.CmpInt64(int64(metrics.Memory.Total-metrics.Memory.Usage)) >= 0 {
|
||||
return fmt.Errorf("unable to create user: Insufficient memory available in the cluster to meet the quota, required is: %.0f bytes, but available is: %.0f bytes", memory.AsApproximateFloat64(), metrics.Memory.Total-metrics.Memory.Usage)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) handleUserDeletion(ctx context.Context, user *iamv1alpha2.User) error {
|
||||
klog.Infof("starting user deletion for %s", user.Name)
|
||||
|
||||
// Update status to Deleting if not already
|
||||
if user.Status.State != "Deleting" {
|
||||
updateErr := r.updateUserStatus(ctx, user, "Deleting", "Starting user deletion process")
|
||||
if updateErr != nil {
|
||||
klog.Errorf("failed to update user %v", updateErr)
|
||||
return updateErr
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up user resources
|
||||
err := r.cleanupUserResources(ctx, user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to cleanup user resources: %v", err)
|
||||
return err
|
||||
}
|
||||
// wait for user-space, user-system namespace to be deleted
|
||||
userspaceNs := fmt.Sprintf("user-space-%s", user.Name)
|
||||
userSystemNs := fmt.Sprintf("user-system-%s", user.Name)
|
||||
userspaceExist, userSystemExist := true, true
|
||||
err = utilwait.PollImmediate(2*time.Second, 5*time.Minute, func() (done bool, err error) {
|
||||
var ns corev1.Namespace
|
||||
err = r.Get(ctx, types.NamespacedName{Name: userspaceNs}, &ns)
|
||||
if apierrors.IsNotFound(err) {
|
||||
userspaceExist = false
|
||||
}
|
||||
err = r.Get(ctx, types.NamespacedName{Name: userSystemNs}, &ns)
|
||||
if apierrors.IsNotFound(err) {
|
||||
userSystemExist = false
|
||||
}
|
||||
if !userspaceExist && !userSystemExist {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("wait for user namespace to deleted failed %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) validateResourceLimits(user *iamv1alpha2.User) error {
|
||||
return users.ValidateResourceLimits(user)
|
||||
}
|
||||
|
||||
func (r *UserController) waitForDeleteFromLLDAP(username string) error {
|
||||
err := utilwait.PollImmediate(interval, timeout, func() (done bool, err error) {
|
||||
err = r.LLdapClient.Users().Delete(context.TODO(), username)
|
||||
if err != nil && lapierrors.IsNotFound(err) {
|
||||
klog.Error(err)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *UserController) createUserResources(ctx context.Context, user *iamv1alpha2.User) error {
|
||||
// Create user using userspace manager
|
||||
klog.Infof("creating user resources for %s", user.Name)
|
||||
|
||||
// create globalrolebinding
|
||||
globalRoleBinding := iamv1alpha2.GlobalRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: iamv1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: iamv1alpha2.ResourceKindGlobalRoleBinding,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: user.Name,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: iamv1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: iamv1alpha2.ResourceKindGlobalRole,
|
||||
Name: getGlobalRole(user.Annotations[users.UserAnnotationOwnerRole]),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
APIGroup: iamv1alpha2.SchemeGroupVersion.String(),
|
||||
Kind: iamv1alpha2.ResourceKindUser,
|
||||
Name: user.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := r.Create(ctx, &globalRoleBinding)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.Errorf("failed to create gloabalrolebinding %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = r.createNamespace(ctx, user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create namespace %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ksClient, err := kubernetes.NewForConfig(r.KubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("make ksClient failed %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// copy ssl configmap to new userspace
|
||||
var applyCm *applyCorev1.ConfigMapApplyConfiguration
|
||||
creatorUser, err := utils.FindOwnerUser(r.Client, user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to find user with owner role %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ownerUserspace := fmt.Sprintf("user-space-%s", creatorUser.Name)
|
||||
nsName := fmt.Sprintf("user-space-%s", user.Name)
|
||||
sslConfig, err := ksClient.CoreV1().ConfigMaps(ownerUserspace).Get(ctx, "zone-ssl-config", metav1.GetOptions{})
|
||||
if err == nil && sslConfig != nil {
|
||||
sslConfig.Data["ephemeral"] = "true"
|
||||
|
||||
applyCm = NewApplyConfigmap(nsName, sslConfig.Data)
|
||||
_, err = ksClient.CoreV1().ConfigMaps(nsName).Apply(ctx, applyCm, metav1.ApplyOptions{
|
||||
FieldManager: "application/apply-patch"})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to apply configmap %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = r.createUserApps(ctx, user)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create user apps %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) createNamespace(ctx context.Context, user *iamv1alpha2.User) error {
|
||||
|
||||
// create namespace user-space-<user>
|
||||
userspaceNs := fmt.Sprintf("user-space-%s", user.Name)
|
||||
userSystemNs := fmt.Sprintf("user-system-%s", user.Name)
|
||||
creatorUser, err := utils.FindOwnerUser(r.Client, user)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// create user-space namespace
|
||||
ns := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: userspaceNs,
|
||||
Annotations: map[string]string{
|
||||
creator: creatorUser.Name,
|
||||
},
|
||||
Finalizers: []string{
|
||||
namespaceFinalizer,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = r.Create(ctx, &ns)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.Errorf("failed to create user-space namespace %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// create user-system namespace
|
||||
userSystemNamespace := corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: userSystemNs,
|
||||
Annotations: map[string]string{
|
||||
"kubesphere.io/creator": "",
|
||||
},
|
||||
Finalizers: []string{
|
||||
namespaceFinalizer,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = r.Create(ctx, &userSystemNamespace)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.Errorf("failed to create user-system namespace %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) createUserApps(ctx context.Context, user *iamv1alpha2.User) error {
|
||||
creator := userspace.NewCreator(r.Client, r.KubeConfig, user.Name)
|
||||
_, _, err := creator.CreateUserApps(ctx)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create user apps %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) cleanupUserResources(ctx context.Context, user *iamv1alpha2.User) error {
|
||||
deleter := userspace.NewDeleter(r.Client, r.KubeConfig, user.Name)
|
||||
err := deleter.DeleteUserResource(ctx)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete user %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserController) checkClusterPodCapacity(ctx context.Context) (bool, error) {
|
||||
return users.CheckClusterPodCapacity(ctx, r.Client)
|
||||
}
|
||||
|
||||
func (r *UserController) updateUserStatus(ctx context.Context, user *iamv1alpha2.User, state, reason string) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// Get the latest version of the user
|
||||
latestUser := &iamv1alpha2.User{}
|
||||
err := r.Get(ctx, types.NamespacedName{Name: user.Name}, latestUser)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
latestUser.Status.State = iamv1alpha2.UserState(state)
|
||||
latestUser.Status.Reason = reason
|
||||
|
||||
return r.Update(ctx, latestUser)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (r *UserController) getCredentialVal(ctx context.Context, key string) (string, error) {
|
||||
var secret corev1.Secret
|
||||
k := types.NamespacedName{Name: "lldap-credentials", Namespace: "os-platform"}
|
||||
err := r.Client.Get(ctx, k, &secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if value, ok := secret.Data[key]; ok {
|
||||
return string(value), nil
|
||||
}
|
||||
return "", fmt.Errorf("can not find credentialval for key %s", key)
|
||||
|
||||
}
|
||||
|
||||
func (r *UserController) getLLdapClient() (*lclient.Client, error) {
|
||||
bindUsername, err := r.getCredentialVal(context.TODO(), "lldap-ldap-user-dn")
|
||||
if err != nil {
|
||||
klog.Infof("get lldap secret failed %v", err)
|
||||
return nil, err
|
||||
}
|
||||
bindPassword, err := r.getCredentialVal(context.TODO(), "lldap-ldap-user-pass")
|
||||
if err != nil {
|
||||
klog.Infof("get lldap secret failed %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lldapClient, err := lclient.New(&lconfig.Config{
|
||||
Host: "http://lldap-service.os-platform:17170",
|
||||
Username: bindUsername,
|
||||
Password: bindPassword,
|
||||
TokenCache: memory.New(),
|
||||
})
|
||||
if err != nil {
|
||||
klog.Infof("get lldap client failed %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return lldapClient, nil
|
||||
}
|
||||
|
||||
func (r *UserController) waitForSyncToLLDAP(user *iamv1alpha2.User) error {
|
||||
ana := user.Annotations
|
||||
if ana == nil {
|
||||
return nil
|
||||
}
|
||||
isNeedSyncToLLDap, _ := strconv.ParseBool(ana[needSyncToLLdapAna])
|
||||
//synced, _ := strconv.ParseBool(ana[syncedToLLdapAna])
|
||||
if !isNeedSyncToLLDap {
|
||||
return nil
|
||||
}
|
||||
var userIndex int
|
||||
|
||||
err := utilwait.PollImmediate(interval, timeout, func() (done bool, err error) {
|
||||
klog.Infof("poll info from lldap...")
|
||||
_, err = r.LLdapClient.Users().Get(context.TODO(), user.Name)
|
||||
|
||||
if err != nil {
|
||||
// user not synced to lldap
|
||||
if lapierrors.IsNotFound(err) {
|
||||
u := generated.CreateUserInput{
|
||||
Id: user.Name,
|
||||
Email: user.Spec.Email,
|
||||
DisplayName: user.Name,
|
||||
}
|
||||
userRes, err := r.LLdapClient.Users().Create(context.TODO(), &u, user.Spec.InitialPassword)
|
||||
if err != nil && !lapierrors.IsAlreadyExists(err) {
|
||||
return false, err
|
||||
}
|
||||
// user created success in lldap
|
||||
|
||||
userIndex = userRes.CreateUser.UserIndex
|
||||
|
||||
for _, groupName := range user.Spec.Groups {
|
||||
var gid int
|
||||
g, err := r.LLdapClient.Groups().GetByName(context.TODO(), groupName)
|
||||
if err != nil && !lapierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// group already exist in lldap
|
||||
gid = g.Id
|
||||
}
|
||||
|
||||
// group does not exist in lldap, so create it
|
||||
if lapierrors.IsNotFound(err) {
|
||||
newGroup, err := r.LLdapClient.Groups().Create(context.TODO(), groupName, "")
|
||||
if err != nil && !lapierrors.IsAlreadyExists(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
gid = newGroup.Id
|
||||
}
|
||||
}
|
||||
if gid == 0 {
|
||||
return false, errors.New("invalid group id")
|
||||
}
|
||||
err = r.LLdapClient.Groups().AddUser(context.TODO(), user.Name, gid)
|
||||
if err != nil && !lapierrors.IsAlreadyExists(err) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
// user already exists in lldap, should add/remove group
|
||||
u, err := r.LLdapClient.Users().Get(context.TODO(), user.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
userIndex = u.UserIndex
|
||||
|
||||
getGroups := func(u *generated.GetUserDetailsUser) (groups []string) {
|
||||
for _, group := range u.Groups {
|
||||
groups = append(groups, group.DisplayName)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
oldGroups := sets.NewString(getGroups(u)...)
|
||||
curGroups := sets.NewString(user.Spec.Groups...)
|
||||
groupToDelete := oldGroups.Difference(curGroups)
|
||||
groupToAdd := curGroups.Difference(oldGroups)
|
||||
|
||||
for groupName := range groupToDelete {
|
||||
group, err := r.LLdapClient.Groups().GetByName(context.TODO(), groupName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = r.LLdapClient.Groups().RemoveUser(context.TODO(), user.Name, group.Id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
for groupName := range groupToAdd {
|
||||
groupId := 0
|
||||
group, err := r.LLdapClient.Groups().GetByName(context.TODO(), groupName)
|
||||
if err != nil {
|
||||
if !lapierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
groupNew, err := r.LLdapClient.Groups().Create(context.TODO(), groupName, "")
|
||||
if err != nil && !lapierrors.IsAlreadyExists(err) {
|
||||
return false, err
|
||||
}
|
||||
groupId = groupNew.Id
|
||||
} else {
|
||||
groupId = group.Id
|
||||
}
|
||||
err = r.LLdapClient.Groups().AddUser(context.TODO(), user.Name, groupId)
|
||||
if err != nil && !lapierrors.IsAlreadyExists(err) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
var u iamv1alpha2.User
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: user.Name}, &u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.Annotations[syncedToLLdapAna] = "true"
|
||||
u.Annotations[userIndexAna] = strconv.FormatInt(int64(userIndex-2), 10)
|
||||
u.Spec.InitialPassword = ""
|
||||
err = r.Update(context.TODO(), &u, &client.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
klog.V(0).Infof("poll result %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// UserCreateOption represents the options for creating a user
|
||||
type UserCreateOption struct {
|
||||
Name string
|
||||
OwnerRole string
|
||||
DisplayName string
|
||||
Email string
|
||||
Password string
|
||||
Description string
|
||||
TerminusName string
|
||||
MemoryLimit string
|
||||
CpuLimit string
|
||||
}
|
||||
|
||||
func NewApplyConfigmap(namespace string, data map[string]string) *applyCorev1.ConfigMapApplyConfiguration {
|
||||
return &applyCorev1.ConfigMapApplyConfiguration{
|
||||
TypeMetaApplyConfiguration: applyMetav1.TypeMetaApplyConfiguration{
|
||||
Kind: pointer.String("ConfigMap"),
|
||||
APIVersion: pointer.String(corev1.SchemeGroupVersion.String()),
|
||||
},
|
||||
ObjectMetaApplyConfiguration: &applyMetav1.ObjectMetaApplyConfiguration{
|
||||
Name: pointer.String("zone-ssl-config"),
|
||||
Namespace: pointer.String(namespace),
|
||||
},
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func getGlobalRole(role string) string {
|
||||
m := map[string]string{
|
||||
"owner": "platform-admin",
|
||||
"admin": "platform-admin",
|
||||
"normal": "workspaces-manager",
|
||||
}
|
||||
return m[role]
|
||||
}
|
||||
118
framework/app-service/controllers/userenv_controller.go
Normal file
118
framework/app-service/controllers/userenv_controller.go
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/security"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type UserEnvController struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=userenvs,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=userenvs/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=appenvs,verbs=get;list;watch;update;patch
|
||||
|
||||
func (r *UserEnvController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
var userEnv sysv1alpha1.UserEnv
|
||||
if err := r.Get(ctx, req.NamespacedName, &userEnv); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
return r.reconcileUserEnv(ctx, &userEnv)
|
||||
}
|
||||
|
||||
func (r *UserEnvController) reconcileUserEnv(ctx context.Context, userEnv *sysv1alpha1.UserEnv) (ctrl.Result, error) {
|
||||
// Extract username from UserEnv namespace
|
||||
isUserNs, username := security.IsUserInternalNamespaces(userEnv.Namespace)
|
||||
if !isUserNs {
|
||||
klog.Warningf("UserEnv %s/%s is not in a user namespace, skipping reconciliation", userEnv.Namespace, userEnv.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
klog.Infof("Processing UserEnv change: %s of user: %s", userEnv.EnvName, username)
|
||||
|
||||
var appEnvList sysv1alpha1.AppEnvList
|
||||
if err := r.List(ctx, &appEnvList); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to list AppEnvs: %v", err)
|
||||
}
|
||||
|
||||
refCount := 0
|
||||
annotatedCount := 0
|
||||
failedCount := 0
|
||||
|
||||
for i := range appEnvList.Items {
|
||||
appEnv := &appEnvList.Items[i]
|
||||
if appEnv.AppOwner != username {
|
||||
continue
|
||||
}
|
||||
if r.isReferenced(appEnv, userEnv.EnvName) {
|
||||
refCount++
|
||||
|
||||
annotated, err := r.annotateAppEnvForSync(ctx, appEnv, userEnv)
|
||||
if annotated {
|
||||
annotatedCount++
|
||||
}
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to annotate AppEnv %s/%s for sync: %v", appEnv.Namespace, appEnv.Name, err)
|
||||
failedCount++
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if refCount > 0 {
|
||||
klog.Infof("UserEnv %s reconciliation completed: %d total references, %d annotated for sync, %d failed",
|
||||
userEnv.EnvName, refCount, annotatedCount, failedCount)
|
||||
}
|
||||
|
||||
if failedCount > 0 {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to annotate %d AppEnvs referencing environment variable %s", failedCount, userEnv.EnvName)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *UserEnvController) isReferenced(appEnv *sysv1alpha1.AppEnv, userEnvName string) bool {
|
||||
for _, envVar := range appEnv.Envs {
|
||||
if envVar.ValueFrom != nil && envVar.ValueFrom.EnvName == userEnvName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *UserEnvController) annotateAppEnvForSync(ctx context.Context, appEnv *sysv1alpha1.AppEnv, userEnv *sysv1alpha1.UserEnv) (bool, error) {
|
||||
// Check if annotation already exists
|
||||
if appEnv.Annotations != nil && appEnv.Annotations[constants.AppEnvSyncAnnotation] != "" {
|
||||
klog.V(4).Infof("AppEnv %s/%s already has sync annotation, skipping", appEnv.Namespace, appEnv.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Add annotation to trigger AppEnvController sync
|
||||
original := appEnv.DeepCopy()
|
||||
if appEnv.Annotations == nil {
|
||||
appEnv.Annotations = make(map[string]string)
|
||||
}
|
||||
appEnv.Annotations[constants.AppEnvSyncAnnotation] = userEnv.EnvName
|
||||
|
||||
klog.Infof("Annotating AppEnv %s/%s for sync due to environment variable %s change",
|
||||
appEnv.Namespace, appEnv.Name, userEnv.EnvName)
|
||||
|
||||
return true, r.Patch(ctx, appEnv, client.MergeFrom(original))
|
||||
}
|
||||
|
||||
func (r *UserEnvController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&sysv1alpha1.UserEnv{}).
|
||||
Complete(r)
|
||||
}
|
||||
192
framework/app-service/controllers/userenv_sync_controller.go
Normal file
192
framework/app-service/controllers/userenv_sync_controller.go
Normal file
|
|
@ -0,0 +1,192 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"gopkg.in/yaml.v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
iamv1alpha2 "github.com/beclab/api/iam/v1alpha2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
const (
|
||||
userEnvConfigMapNamespace = "os-framework"
|
||||
userEnvConfigMapName = "user-env"
|
||||
userEnvConfigMapKey = "user-env.yaml"
|
||||
)
|
||||
|
||||
type userEnvFile struct {
|
||||
APIVersion string `yaml:"apiVersion"`
|
||||
UserEnvs []sysv1alpha1.EnvVarSpec `yaml:"userEnvs"`
|
||||
}
|
||||
|
||||
type UserEnvSyncController struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch
|
||||
//+kubebuilder:rbac:groups=iam.kubesphere.io,resources=users,verbs=get;list;watch
|
||||
//+kubebuilder:rbac:groups=sys.bytetrade.io,resources=userenvs,verbs=get;list;watch;create
|
||||
|
||||
func (r *UserEnvSyncController) SetupWithManager(mgr ctrl.Manager) error {
|
||||
cmPred := predicate.NewPredicateFuncs(func(obj client.Object) bool {
|
||||
return obj.GetNamespace() == userEnvConfigMapNamespace && obj.GetName() == userEnvConfigMapName
|
||||
})
|
||||
|
||||
userPred := predicate.NewPredicateFuncs(func(obj client.Object) bool {
|
||||
user, ok := obj.(*iamv1alpha2.User)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return string(user.Status.State) == "Created"
|
||||
})
|
||||
|
||||
return builder.ControllerManagedBy(mgr).
|
||||
For(&corev1.ConfigMap{}, builder.WithPredicates(cmPred)).
|
||||
Watches(&iamv1alpha2.User{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
user, ok := obj.(*iamv1alpha2.User)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: user.Name}}}
|
||||
}), builder.WithPredicates(userPred)).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *UserEnvSyncController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
// the changes on the configmap triggers a sync operation for all users
|
||||
if req.Namespace == userEnvConfigMapNamespace && req.Name == userEnvConfigMapName {
|
||||
return r.reconcileAllUsers(ctx)
|
||||
}
|
||||
|
||||
// the changes on a single user resource triggers a sync operation only for this particular user
|
||||
if req.Namespace == "" && req.Name != "" {
|
||||
return r.reconcileSingleUser(ctx, req.Name)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *UserEnvSyncController) reconcileAllUsers(ctx context.Context) (ctrl.Result, error) {
|
||||
klog.Infof("UserEnvSync: detected %s/%s change, syncing all users", userEnvConfigMapNamespace, userEnvConfigMapName)
|
||||
|
||||
base, err := r.loadBaseUserEnvFromConfigMap(ctx)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if base == nil {
|
||||
klog.Warningf("UserEnvSync: base user env config not found; skipping")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var users iamv1alpha2.UserList
|
||||
if err := r.List(ctx, &users); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("list users failed: %w", err)
|
||||
}
|
||||
|
||||
failed := 0
|
||||
for i := range users.Items {
|
||||
user := &users.Items[i]
|
||||
if string(user.Status.State) != "Created" {
|
||||
continue
|
||||
}
|
||||
if _, err := r.syncUserEnvForUser(ctx, user.Name, base.UserEnvs); err != nil {
|
||||
klog.Errorf("UserEnvSync: failed to sync for user %s: %v", user.Name, err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
|
||||
if failed > 0 {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to sync userenv for %d users", failed)
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *UserEnvSyncController) reconcileSingleUser(ctx context.Context, username string) (ctrl.Result, error) {
|
||||
klog.Infof("UserEnvSync: user change detected for %s, syncing user envs", username)
|
||||
|
||||
u := &iamv1alpha2.User{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: username}, u); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
if string(u.Status.State) != "Created" {
|
||||
klog.V(4).Infof("UserEnvSync: skipping user %s with state %s", username, u.Status.State)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
base, err := r.loadBaseUserEnvFromConfigMap(ctx)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if base == nil {
|
||||
klog.Warningf("UserEnvSync: base user env config not found; skipping for user %s", username)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
_, err = r.syncUserEnvForUser(ctx, username, base.UserEnvs)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
func (r *UserEnvSyncController) loadBaseUserEnvFromConfigMap(ctx context.Context) (*userEnvFile, error) {
|
||||
cm := &corev1.ConfigMap{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: userEnvConfigMapNamespace, Name: userEnvConfigMapName}, cm); err != nil {
|
||||
return nil, client.IgnoreNotFound(err)
|
||||
}
|
||||
content := cm.Data[userEnvConfigMapKey]
|
||||
if content == "" {
|
||||
return &userEnvFile{}, nil
|
||||
}
|
||||
var cfg userEnvFile
|
||||
if err := yaml.Unmarshal([]byte(content), &cfg); err != nil {
|
||||
return nil, fmt.Errorf("parse base user env config from cm failed: %w", err)
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func (r *UserEnvSyncController) syncUserEnvForUser(ctx context.Context, username string, base []sysv1alpha1.EnvVarSpec) (int, error) {
|
||||
userNs := apputils.UserspaceName(username)
|
||||
var existing sysv1alpha1.UserEnvList
|
||||
if err := r.List(ctx, &existing, client.InNamespace(userNs)); err != nil {
|
||||
return 0, fmt.Errorf("list userenvs in %s failed: %w", userNs, err)
|
||||
}
|
||||
|
||||
existSet := make(map[string]struct{}, len(existing.Items))
|
||||
for i := range existing.Items {
|
||||
existSet[existing.Items[i].EnvName] = struct{}{}
|
||||
}
|
||||
|
||||
created := 0
|
||||
for _, spec := range base {
|
||||
if _, ok := existSet[spec.EnvName]; ok {
|
||||
continue
|
||||
}
|
||||
name, err := apputils.EnvNameToResourceName(spec.EnvName)
|
||||
if err != nil {
|
||||
klog.Warningf("UserEnvSync: skip invalid env name %s for user %s: %v", spec.EnvName, username, err)
|
||||
continue
|
||||
}
|
||||
ue := &sysv1alpha1.UserEnv{}
|
||||
ue.Name = name
|
||||
ue.Namespace = userNs
|
||||
ue.EnvVarSpec = spec
|
||||
if err := r.Create(ctx, ue); err != nil {
|
||||
return created, fmt.Errorf("create userenv %s/%s failed: %w", userNs, name, err)
|
||||
}
|
||||
created++
|
||||
klog.Infof("UserEnvSync: created userenv %s/%s for user %s", userNs, name, username)
|
||||
}
|
||||
return created, nil
|
||||
}
|
||||
271
framework/app-service/go.mod
Normal file
271
framework/app-service/go.mod
Normal file
|
|
@ -0,0 +1,271 @@
|
|||
module bytetrade.io/web3os/app-service
|
||||
|
||||
go 1.24.2
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.4.0
|
||||
github.com/apecloud/kubeblocks v1.0.0
|
||||
github.com/argoproj/argo-workflows/v3 v3.7.1
|
||||
github.com/beclab/api v0.0.2
|
||||
github.com/beclab/lldap-client v0.0.11
|
||||
github.com/containerd/containerd v1.7.28
|
||||
github.com/containers/image/v5 v5.36.1
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/emicklei/go-restful-openapi/v2 v2.11.0
|
||||
github.com/emicklei/go-restful/v3 v3.13.0
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/go-crypt/crypt v0.4.5
|
||||
github.com/go-logr/logr v1.4.3
|
||||
github.com/go-openapi/spec v0.21.0
|
||||
github.com/go-resty/resty/v2 v2.16.5
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/go-github/v50 v50.2.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-getter v1.7.9
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/go-version v1.7.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/nats-io/nats.go v1.45.0
|
||||
github.com/onsi/ginkgo/v2 v2.25.2
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.23.0
|
||||
github.com/prometheus/common v0.65.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/thoas/go-funk v0.9.3
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||
google.golang.org/protobuf v1.36.8
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools/v3 v3.5.2
|
||||
helm.sh/helm/v3 v3.18.6
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/cli-runtime v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/code-generator v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d
|
||||
sigs.k8s.io/controller-runtime v0.22.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go v0.121.2 // indirect
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
cloud.google.com/go/storage v1.55.0 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.28.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.52.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect
|
||||
github.com/Khan/genqlient v0.7.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.13.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.50.8 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.3 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.5 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/containerd/api v1.8.0 // indirect
|
||||
github.com/containerd/continuity v0.4.4 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/fifo v1.1.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/ocicrypt v1.2.1 // indirect
|
||||
github.com/containers/storage v1.59.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v28.3.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-crypt/x v0.4.7 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-containerregistry v0.20.5 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-safetemp v1.0.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/capability v0.4.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nats-io/nkeys v0.4.11 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.1 // indirect
|
||||
github.com/opencontainers/selinux v1.12.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.9.2 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.7 // indirect
|
||||
github.com/spf13/viper v1.20.1 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.21.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/urfave/cli v1.22.16 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.5.20 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.41.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/net v0.43.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/term v0.34.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/api v0.236.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/grpc v1.72.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.0 // indirect
|
||||
k8s.io/apiserver v0.34.0 // indirect
|
||||
k8s.io/component-base v0.34.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
k8s.io/kubectl v0.33.3 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
||||
2256
framework/app-service/go.sum
Normal file
2256
framework/app-service/go.sum
Normal file
File diff suppressed because it is too large
Load diff
0
framework/app-service/hack/boilerplate.go.txt
Normal file
0
framework/app-service/hack/boilerplate.go.txt
Normal file
5
framework/app-service/hack/tools.go
Normal file
5
framework/app-service/hack/tools.go
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
package tools
|
||||
|
||||
import (
|
||||
_ "k8s.io/code-generator"
|
||||
)
|
||||
38
framework/app-service/hack/update-codegen.sh
Executable file
38
framework/app-service/hack/update-codegen.sh
Executable file
|
|
@ -0,0 +1,38 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
|
||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||
|
||||
THIS_PKG="bytetrade.io/web3os/app-service"
|
||||
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/api"
|
||||
|
||||
kube::codegen::gen_client \
|
||||
--with-watch \
|
||||
--output-dir "${SCRIPT_ROOT}/pkg/generated" \
|
||||
--output-pkg "${THIS_PKG}/pkg/generated" \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/api"
|
||||
|
||||
44
framework/app-service/hack/verify-codegen.sh
Executable file
44
framework/app-service/hack/verify-codegen.sh
Executable file
|
|
@ -0,0 +1,44 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
|
||||
DIFFROOT="${SCRIPT_ROOT}/pkg"
|
||||
TMP_DIFFROOT="$(mktemp -d -t "$(basename "$0").XXXXXX")/pkg"
|
||||
|
||||
cleanup() {
|
||||
rm -rf "${TMP_DIFFROOT}"
|
||||
}
|
||||
trap "cleanup" EXIT SIGINT
|
||||
|
||||
cleanup
|
||||
|
||||
mkdir -p "${TMP_DIFFROOT}"
|
||||
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
|
||||
|
||||
"${SCRIPT_ROOT}/hack/update-codegen.sh"
|
||||
echo "diffing ${DIFFROOT} against freshly generated codegen"
|
||||
ret=0
|
||||
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
|
||||
if [[ $ret -eq 0 ]]; then
|
||||
echo "${DIFFROOT} up to date."
|
||||
else
|
||||
echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh"
|
||||
fi
|
||||
exit $ret
|
||||
13
framework/app-service/pkg/apiserver/api/errors.go
Normal file
13
framework/app-service/pkg/apiserver/api/errors.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
package api
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrResourceNotFound indicates that a resource is not found.
|
||||
ErrResourceNotFound = errors.New("resource not found")
|
||||
ErrGPUNodeNotFound = errors.New("no available gpu node found")
|
||||
ErrStartUpFailed = errors.New("app started up failed")
|
||||
ErrLaunchFailed = errors.New("app launched failed")
|
||||
ErrNotSupportOperation = errors.New("not support operation")
|
||||
ErrApplicationManagerNotFound = errors.New("application-manager not found")
|
||||
)
|
||||
276
framework/app-service/pkg/apiserver/api/types.go
Normal file
276
framework/app-service/pkg/apiserver/api/types.go
Normal file
|
|
@ -0,0 +1,276 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
imagetypes "github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
const (
|
||||
AppTokenKey = "bytetrade.io/token"
|
||||
AppRepoURLKey = "bytetrade.io/repo-url"
|
||||
AppVersionKey = "bytetrade.io/chart-version"
|
||||
AppMarketSourceKey = constants.AppMarketSourceKey
|
||||
AppInstallSourceKey = "bytetrade.io/install-source"
|
||||
AppUninstallAllKey = "bytetrade.io/uninstall-all"
|
||||
AppImagesKey = "bytetrade.io/images"
|
||||
)
|
||||
|
||||
// Response represents the code for response.
|
||||
type Response struct {
|
||||
Code int32 `json:"code"`
|
||||
}
|
||||
|
||||
// InstallationResponse represents the response for installation.
|
||||
type InstallationResponse struct {
|
||||
Response
|
||||
Data InstallationResponseData `json:"data"`
|
||||
}
|
||||
|
||||
// InstallationResponseData represents the installation response uid.
|
||||
type InstallationResponseData struct {
|
||||
UID string `json:"uid"`
|
||||
OpID string `json:"opID"`
|
||||
}
|
||||
|
||||
// DependenciesRespData represents the dependencies of an application.
|
||||
type DependenciesRespData struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Version string `yaml:"version" json:"version"`
|
||||
// dependency type: system, application.
|
||||
Type string `yaml:"type" json:"type"`
|
||||
}
|
||||
|
||||
// DependenciesResp represents the response for application dependencies.
|
||||
type DependenciesResp struct {
|
||||
Response
|
||||
Data []DependenciesRespData `json:"data"`
|
||||
}
|
||||
|
||||
// ReleaseUpgradeResponse represents a response for a release upgrade operation.
|
||||
type ReleaseUpgradeResponse struct {
|
||||
Response
|
||||
Data ReleaseUpgradeResponseData `json:"data"`
|
||||
}
|
||||
|
||||
// ReleaseUpgradeResponseData represents a response uid for a release upgrade operation.
|
||||
type ReleaseUpgradeResponseData struct {
|
||||
UID string `json:"uid"`
|
||||
}
|
||||
|
||||
// ReleaseVersionResponse represents a response for retrieving release version.
|
||||
type ReleaseVersionResponse struct {
|
||||
Response
|
||||
Data ReleaseVersionData `json:"data"`
|
||||
}
|
||||
|
||||
// ReleaseVersionData contains release version.
|
||||
type ReleaseVersionData struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type UserAppsResponse struct {
|
||||
Response
|
||||
Data UserAppsStatusRespData `json:"data"`
|
||||
}
|
||||
|
||||
type UserAppsStatusRespData struct {
|
||||
User string `json:"user"`
|
||||
Status string `json:"status"`
|
||||
Ports UserAppsPorts `json:"ports"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type UserAppsPorts struct {
|
||||
Desktop int32 `json:"desktop"`
|
||||
Wizard int32 `json:"wizard"`
|
||||
}
|
||||
|
||||
// RequirementResp represents a response for application requirement.
|
||||
type RequirementResp struct {
|
||||
Response
|
||||
Resource string `json:"resource"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// AppSource describe the source of an application, recommend,model,agent
|
||||
type AppSource string
|
||||
|
||||
const (
|
||||
// Market deployed from market.
|
||||
Market AppSource = "market"
|
||||
// Custom deployed from upload chart by user.
|
||||
Custom AppSource = "custom"
|
||||
// DevBox deployed from devbox.
|
||||
DevBox AppSource = "devbox"
|
||||
// System deployed from system.
|
||||
System AppSource = "system"
|
||||
// Unknown means the source is unknown.
|
||||
Unknown AppSource = "unknown"
|
||||
)
|
||||
|
||||
func (as AppSource) String() string {
|
||||
return string(as)
|
||||
}
|
||||
|
||||
// UpgradeRequest represents a request to upgrade an application.
|
||||
type UpgradeRequest struct {
|
||||
CfgURL string `json:"cfgURL,omitempty"`
|
||||
RepoURL string `json:"repoURL"`
|
||||
Version string `json:"version"`
|
||||
Source AppSource `json:"source"`
|
||||
}
|
||||
|
||||
// InstallRequest represents a request to install an application.
|
||||
type InstallRequest struct {
|
||||
Dev bool `json:"devMode"`
|
||||
RepoURL string `json:"repoUrl"`
|
||||
CfgURL string `json:"cfgUrl"`
|
||||
Source AppSource `json:"source"`
|
||||
Images []Image `json:"images"`
|
||||
Envs []sysv1alpha1.AppEnvVar `json:"envs"`
|
||||
RawAppName string `json:"rawAppName"`
|
||||
Title string `json:"title"`
|
||||
Entrances []EntranceClone `json:"entrances"`
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// UninstallRequest represents a request to uninstall an application.
|
||||
type UninstallRequest struct {
|
||||
All bool `json:"all"`
|
||||
}
|
||||
|
||||
type ManifestRenderRequest struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type ManifestRenderResponse struct {
|
||||
Response
|
||||
Data ManifestRenderRespData `json:"data"`
|
||||
}
|
||||
type ManifestRenderRespData struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type AdminUsernameResponse struct {
|
||||
Response
|
||||
Data AdminUsernameRespData `json:"data"`
|
||||
}
|
||||
|
||||
type AdminUsernameRespData struct {
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
type AdminListResponse struct {
|
||||
Response
|
||||
Data []string `json:"data"`
|
||||
}
|
||||
|
||||
// ResponseWithMsg represents a response with an additional message.
|
||||
type ResponseWithMsg struct {
|
||||
Response
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type ImageInfoRequest struct {
|
||||
AppName string `json:"name"`
|
||||
Images []ImageInfo `json:"images"`
|
||||
}
|
||||
type ImageInfo struct {
|
||||
ImageName string `json:"name"`
|
||||
InfoV2 []ImageInfoV2 `json:"image_info_v2"`
|
||||
}
|
||||
|
||||
type ImageInfoV2 struct {
|
||||
Tag string `json:"Tag"`
|
||||
Created string `json:"Created"`
|
||||
DockerVersion string `json:"DockerVersion"`
|
||||
Labels map[string]string `json:"Labels"`
|
||||
Architecture string `json:"Architecture"`
|
||||
Variant string `json:"Variant"`
|
||||
Os string `json:"Os"`
|
||||
Layers []string `json:"Layers"` // Array of layer digests
|
||||
LayersData []imagetypes.ImageInspectLayer `json:"LayersData"`
|
||||
Env []string `json:"Env"`
|
||||
Author string `json:"Author"`
|
||||
}
|
||||
|
||||
var (
|
||||
CheckTypeAppEnv = "appenv"
|
||||
CheckTypeAppEntrance = "appEntrance"
|
||||
)
|
||||
|
||||
type FailedCheckResponse struct {
|
||||
Code int `json:"code"`
|
||||
Data FailedCheckResponseData `json:"data"`
|
||||
}
|
||||
|
||||
type FailedCheckResponseData struct {
|
||||
Type string `json:"type"`
|
||||
Data any `json:",inline"`
|
||||
}
|
||||
|
||||
type AppEnvCheckResult struct {
|
||||
MissingValues []sysv1alpha1.AppEnvVar `json:"missingValues"`
|
||||
MissingRefs []sysv1alpha1.AppEnvVar `json:"missingRefs"`
|
||||
InvalidValues []sysv1alpha1.AppEnvVar `json:"invalidValues"`
|
||||
}
|
||||
|
||||
type CanDeployResponse struct {
|
||||
Response
|
||||
Data CanDeployResponseData `json:"data"`
|
||||
}
|
||||
|
||||
type CanDeployResponseData struct {
|
||||
CanOp bool `json:"canOp"`
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
CudaVersion string `json:"cudaVersion"`
|
||||
CPU []CPUInfo `json:"cpu"`
|
||||
Memory MemInfo `json:"memory"`
|
||||
GPUS []GPUInfo `json:"gpus"`
|
||||
}
|
||||
|
||||
type CPUInfo struct {
|
||||
CoreNumber int `json:"coreNumber"`
|
||||
Arch string `json:"arch"`
|
||||
Frequency int `json:"frequency"`
|
||||
Model string `json:"model"`
|
||||
ModelName string `json:"modelName"`
|
||||
Vendor string `json:"vendor"`
|
||||
}
|
||||
|
||||
type MemInfo struct {
|
||||
Total int64 `json:"total"`
|
||||
}
|
||||
|
||||
type GPUInfo struct {
|
||||
Vendor string `json:"vendor"`
|
||||
Architecture string `json:"arch"`
|
||||
Model string `json:"model"`
|
||||
Memory int64 `json:"memory"`
|
||||
ModelName string `json:"modelName"`
|
||||
}
|
||||
|
||||
type EntranceClone struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Title string `json:"title"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type AppTitle struct {
|
||||
Title string `json:"title"`
|
||||
IsValid bool `json:"isValid"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type AppEntranceCheckResult struct {
|
||||
MissingValues []EntranceClone `json:"missingValues"`
|
||||
InvalidValues []EntranceClone `json:"invalidValues"`
|
||||
TitleValidation AppTitle `json:"titleValidation"`
|
||||
}
|
||||
73
framework/app-service/pkg/apiserver/api/utils.go
Normal file
73
framework/app-service/pkg/apiserver/api/utils.go
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
// Avoid emitting errors that look like valid HTML. Quotes are okay.
|
||||
var sanitizer = strings.NewReplacer(`&`, "&", `<`, "<", `>`, ">")
|
||||
|
||||
// HandleInternalError writes http.StatusInternalServerError and log error.
|
||||
func HandleInternalError(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusInternalServerError, response, req, err)
|
||||
}
|
||||
|
||||
// HandleBadRequest writes http.StatusBadRequest and log error.
|
||||
func HandleBadRequest(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusBadRequest, response, req, err)
|
||||
}
|
||||
|
||||
// HandleNotFound writes http.StatusNotFound and log error.
|
||||
func HandleNotFound(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusNotFound, response, req, err)
|
||||
}
|
||||
|
||||
// HandleForbidden writes http.StatusForbidden and log error.
|
||||
func HandleForbidden(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusForbidden, response, req, err)
|
||||
}
|
||||
|
||||
// HandleUnauthorized writes http.StatusUnauthorized and log error.
|
||||
func HandleUnauthorized(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusUnauthorized, response, req, err)
|
||||
}
|
||||
|
||||
// HandleTooManyRequests writes http.StatusTooManyRequests and log error.
|
||||
func HandleTooManyRequests(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusTooManyRequests, response, req, err)
|
||||
}
|
||||
|
||||
// HandleConflict writes http.StatusConflict and log error.
|
||||
func HandleConflict(response *restful.Response, req *restful.Request, err error) {
|
||||
handle(http.StatusConflict, response, req, err)
|
||||
}
|
||||
|
||||
func HandleFailedCheck(response *restful.Response, checkType string, checkResult any, code int) {
|
||||
response.WriteHeaderAndEntity(http.StatusUnprocessableEntity, FailedCheckResponse{Code: code, Data: FailedCheckResponseData{Type: checkType, Data: checkResult}})
|
||||
}
|
||||
|
||||
// HandleError handles the given error by determining the appropriate HTTP status code and performing error handling logic.
|
||||
func HandleError(response *restful.Response, req *restful.Request, err error) {
|
||||
var statusCode int
|
||||
switch t := err.(type) {
|
||||
case errors.APIStatus:
|
||||
statusCode = int(t.Status().Code)
|
||||
case restful.ServiceError:
|
||||
statusCode = t.Code
|
||||
default:
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
handle(statusCode, response, req, err)
|
||||
}
|
||||
|
||||
func handle(statusCode int, response *restful.Response, req *restful.Request, err error) {
|
||||
_, fn, line, _ := runtime.Caller(2)
|
||||
ctrl.Log.Error(err, "response error", "func", fn, "line", line)
|
||||
http.Error(response, sanitizer.Replace(err.Error()), statusCode)
|
||||
}
|
||||
155
framework/app-service/pkg/apiserver/apiserver.go
Normal file
155
framework/app-service/pkg/apiserver/apiserver.go
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
|
||||
restfulspec "github.com/emicklei/go-restful-openapi/v2"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"github.com/go-openapi/spec"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCertPath = "/etc/certs/server.crt"
|
||||
defaultKeyPath = "/etc/certs/server.key"
|
||||
tlsCertEnv = "WEBHOOK_TLS_CERT"
|
||||
tlsKeyEnv = "WEBHOOK_TLS_KEY"
|
||||
)
|
||||
|
||||
var apiHandler *Handler
|
||||
|
||||
// APIServer represents an API server for system.
|
||||
type APIServer struct {
|
||||
Server *http.Server
|
||||
SSLServer *http.Server
|
||||
|
||||
// RESTful Server
|
||||
container *restful.Container
|
||||
|
||||
serverCtx context.Context
|
||||
}
|
||||
|
||||
// New returns an APIServer.
|
||||
func New(ctx context.Context) (*APIServer, error) {
|
||||
server := &http.Server{
|
||||
Addr: constants.APIServerListenAddress,
|
||||
}
|
||||
sslServer := &http.Server{
|
||||
Addr: constants.WebhookServerListenAddress,
|
||||
}
|
||||
|
||||
return &APIServer{
|
||||
Server: server,
|
||||
SSLServer: sslServer,
|
||||
container: restful.NewContainer(),
|
||||
serverCtx: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareRun do prepares for API server.
|
||||
func (s *APIServer) PrepareRun(ksHost string, kubeConfig *rest.Config, client client.Client, stopCh <-chan struct{}) (err error) {
|
||||
s.container.Filter(logRequestAndResponse)
|
||||
s.container.Router(restful.CurlyRouter{})
|
||||
s.container.RecoverHandler(func(panicReason interface{}, httpWriter http.ResponseWriter) {
|
||||
logStackOnRecover(panicReason, httpWriter)
|
||||
})
|
||||
|
||||
// use the server context for goroutine in background
|
||||
apiHandlerBuilder := &handlerBuilder{}
|
||||
apiHandlerBuilder.WithContext(s.serverCtx).
|
||||
WithKubesphereConfig(ksHost).
|
||||
WithKubernetesConfig(kubeConfig).
|
||||
WithCtrlClient(client).
|
||||
WithAppInformer()
|
||||
apiHandler, err = apiHandlerBuilder.Build()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go apiHandler.opController.run()
|
||||
|
||||
err = apiHandler.Run(stopCh)
|
||||
if err != nil {
|
||||
klog.Infof("wait for cache sync failed %v", err)
|
||||
return err
|
||||
}
|
||||
err = addServiceToContainer(s.container, apiHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.installAPIDocs()
|
||||
|
||||
s.Server.Handler = s.container
|
||||
s.SSLServer.Handler = s.container
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run running a server.
|
||||
func (s *APIServer) Run() error {
|
||||
shutdownCtx, cancel := context.WithTimeout(s.serverCtx, 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
<-s.serverCtx.Done()
|
||||
_ = s.Server.Shutdown(shutdownCtx)
|
||||
_ = s.SSLServer.Shutdown(shutdownCtx)
|
||||
ctrl.Log.Info("Shutdown apiserver for app-service")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
tlsCert, tlsKey := defaultCertPath, defaultKeyPath
|
||||
if os.Getenv(tlsCertEnv) != "" && os.Getenv(tlsKeyEnv) != "" {
|
||||
tlsCert, tlsKey = os.Getenv(tlsCertEnv), os.Getenv(tlsKeyEnv)
|
||||
}
|
||||
ctrl.Log.Info("Starting webhook server for app-service", "listen", constants.WebhookServerListenAddress)
|
||||
if err := s.SSLServer.ListenAndServeTLS(tlsCert, tlsKey); err != nil {
|
||||
ctrl.Log.Error(err, "Failed to start webhook server for app-service")
|
||||
}
|
||||
}()
|
||||
ctrl.Log.Info("Starting server for app-service", "listen", constants.APIServerListenAddress)
|
||||
|
||||
return s.Server.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *APIServer) installAPIDocs() {
|
||||
config := restfulspec.Config{
|
||||
WebServices: s.container.RegisteredWebServices(), // you control what services are visible
|
||||
APIPath: "/app-service/v1/apidocs.json",
|
||||
PostBuildSwaggerObjectHandler: enrichSwaggerObject}
|
||||
s.container.Add(restfulspec.NewOpenAPIService(config))
|
||||
}
|
||||
|
||||
func enrichSwaggerObject(swo *spec.Swagger) {
|
||||
swo.Info = &spec.Info{
|
||||
InfoProps: spec.InfoProps{
|
||||
Title: "app-service",
|
||||
Description: "application service, running in background",
|
||||
Contact: &spec.ContactInfo{
|
||||
ContactInfoProps: spec.ContactInfoProps{
|
||||
Name: "bytetrade",
|
||||
Email: "dev@bytetrade.io",
|
||||
URL: "http://bytetrade.io",
|
||||
},
|
||||
},
|
||||
License: &spec.License{
|
||||
LicenseProps: spec.LicenseProps{
|
||||
Name: "Apache License 2.0",
|
||||
URL: "http://www.apache.org/licenses/LICENSE-2.0",
|
||||
},
|
||||
},
|
||||
Version: "0.1.0",
|
||||
},
|
||||
}
|
||||
swo.Tags = []spec.Tag{{TagProps: spec.TagProps{
|
||||
Name: "app-service",
|
||||
Description: "Web 3 OS app-service"}}}
|
||||
}
|
||||
139
framework/app-service/pkg/apiserver/filters.go
Normal file
139
framework/app-service/pkg/apiserver/filters.go
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/client/clientset"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
|
||||
func logStackOnRecover(panicReason interface{}, w http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
|
||||
for i := 2; ; i++ {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
|
||||
}
|
||||
ctrl.Log.Error(errors.New(buffer.String()), "panic error")
|
||||
|
||||
headers := http.Header{}
|
||||
if ct := w.Header().Get("Content-Type"); len(ct) > 0 {
|
||||
headers.Set("Accept", ct)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte("Internal server error"))
|
||||
}
|
||||
|
||||
func logRequestAndResponse(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
|
||||
start := time.Now()
|
||||
chain.ProcessFilter(req, resp)
|
||||
|
||||
// Always log error response
|
||||
if resp.StatusCode() != http.StatusOK {
|
||||
ctrl.Log.Info("request",
|
||||
"IP",
|
||||
utils.RemoteIP(req.Request),
|
||||
"method",
|
||||
req.Request.Method,
|
||||
"URL",
|
||||
req.Request.URL,
|
||||
"proto",
|
||||
req.Request.Proto,
|
||||
"code",
|
||||
resp.StatusCode(),
|
||||
"length",
|
||||
resp.ContentLength(),
|
||||
"timestamp",
|
||||
time.Since(start)/time.Millisecond,
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (h *Handler) createClientSet(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
|
||||
client, err := clientset.New(h.kubeConfig)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
req.SetAttribute(constants.KubeSphereClientAttribute, client)
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
||||
|
||||
func (h *Handler) authenticate(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
|
||||
// Ignore uris, because do not need authentication
|
||||
trustPaths := []string{
|
||||
"/app-service/v1/apidocs.json",
|
||||
"/app-service/v1/sandbox/inject",
|
||||
"/app-service/v1/appns/validate",
|
||||
"/app-service/v1/gpulimit/inject",
|
||||
"/app-service/v1/backup/new",
|
||||
"/app-service/v1/backup/finish",
|
||||
"/app-service/v1/metrics/highload",
|
||||
"/app-service/v1/metrics/user/highload",
|
||||
"/app-service/v1/user-apps/",
|
||||
"/app-service/v1/apidocs.json",
|
||||
"/app-service/v1/recommenddev/",
|
||||
"/app-service/v1/provider-registry/validate",
|
||||
"/app-service/v1/pods/kubelet/eviction",
|
||||
"/app-service/v1/workflow/inject",
|
||||
"/app-service/v1/runasuser/inject",
|
||||
"/app-service/v1/terminus/version",
|
||||
"/app-service/v1/app-label/inject",
|
||||
"/app-service/v1/apps/image-info",
|
||||
"/app-service/v1/all/apps",
|
||||
"/app-service/v1/apps/oamvalues",
|
||||
"/app-service/v1/users/admin/username",
|
||||
"/app-service/v1/user/validate",
|
||||
"/app-service/v1/applicationmanager/inject",
|
||||
"/app-service/v1/applicationmanager/validate",
|
||||
"/app-service/v1/users/admins",
|
||||
"/app-service/v1/middlewares/status",
|
||||
"/app-service/v1/workflow/validate",
|
||||
"/app-service/v1/all/appmanagers",
|
||||
"/app-service/v1/cluster/node_info",
|
||||
}
|
||||
|
||||
needAuth := true
|
||||
func() {
|
||||
for _, p := range trustPaths {
|
||||
switch {
|
||||
case req.Request.URL.Path == p:
|
||||
needAuth = false
|
||||
return
|
||||
case p[len(p)-1] == '/':
|
||||
if len(req.Request.URL.Path) > len(p) && req.Request.URL.Path[:len(p)] == p {
|
||||
needAuth = false
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if needAuth {
|
||||
username := req.Request.Header.Get(constants.BflUserKey)
|
||||
if username == "" {
|
||||
api.HandleUnauthorized(resp, req, errors.New("no authentication info error"))
|
||||
return
|
||||
}
|
||||
|
||||
req.SetAttribute(constants.UserContextAttribute, username)
|
||||
}
|
||||
|
||||
chain.ProcessFilter(req, resp)
|
||||
}
|
||||
169
framework/app-service/pkg/apiserver/handler.go
Normal file
169
framework/app-service/pkg/apiserver/handler.go
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/generated/clientset/versioned"
|
||||
"bytetrade.io/web3os/app-service/pkg/generated/informers/externalversions"
|
||||
lister_v1alpha1 "bytetrade.io/web3os/app-service/pkg/generated/listers/app.bytetrade.io/v1alpha1"
|
||||
|
||||
// upgrade removed from direct usage in handlers
|
||||
"bytetrade.io/web3os/app-service/pkg/users/userspace/v1"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
"bytetrade.io/web3os/app-service/pkg/webhook"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Handler include several fields that used for managing interactions with associated services.
|
||||
type Handler struct {
|
||||
kubeHost string
|
||||
serviceCtx context.Context
|
||||
userspaceManager *userspace.Manager
|
||||
kubeConfig *rest.Config // helm's kubeConfig. TODO: insecure
|
||||
sidecarWebhook *webhook.Webhook
|
||||
ctrlClient client.Client
|
||||
informer externalversions.SharedInformerFactory
|
||||
appLister lister_v1alpha1.ApplicationLister
|
||||
appmgrLister lister_v1alpha1.ApplicationManagerLister
|
||||
appSynced cache.InformerSynced
|
||||
appmgrSynced cache.InformerSynced
|
||||
opController *OpController
|
||||
}
|
||||
|
||||
type handlerBuilder struct {
|
||||
ctx context.Context
|
||||
ksHost string
|
||||
kubeConfig *rest.Config
|
||||
ctrlClient client.Client
|
||||
informer externalversions.SharedInformerFactory
|
||||
}
|
||||
|
||||
func (b *handlerBuilder) WithKubesphereConfig(ksHost string) *handlerBuilder {
|
||||
b.ksHost = ksHost
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *handlerBuilder) WithContext(ctx context.Context) *handlerBuilder {
|
||||
b.ctx = ctx
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *handlerBuilder) WithKubernetesConfig(config *rest.Config) *handlerBuilder {
|
||||
b.kubeConfig = config
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *handlerBuilder) WithCtrlClient(client client.Client) *handlerBuilder {
|
||||
b.ctrlClient = client
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *handlerBuilder) WithAppInformer() *handlerBuilder {
|
||||
appClient, err := versioned.NewForConfig(b.kubeConfig)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
informer := externalversions.NewSharedInformerFactory(appClient, 10*time.Minute)
|
||||
b.informer = informer
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *handlerBuilder) Build() (*Handler, error) {
|
||||
wh, err := webhook.New(b.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = wh.CreateOrUpdateSandboxMutatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateAppNamespaceValidatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateGpuLimitMutatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateProviderRegistryValidatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.DeleteKubeletEvictionValidatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateCronWorkflowMutatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateRunAsUserMutatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateAppLabelMutatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateUserValidatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateApplicationManagerMutatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateApplicationManagerValidatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wh.CreateOrUpdateArgoResourceValidatingWebhook()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Handler{
|
||||
kubeHost: b.ksHost,
|
||||
serviceCtx: b.ctx,
|
||||
kubeConfig: b.kubeConfig,
|
||||
userspaceManager: userspace.NewManager(b.ctx),
|
||||
sidecarWebhook: wh,
|
||||
ctrlClient: b.ctrlClient,
|
||||
informer: b.informer,
|
||||
appLister: b.informer.App().V1alpha1().Applications().Lister(),
|
||||
appmgrLister: b.informer.App().V1alpha1().ApplicationManagers().Lister(),
|
||||
appSynced: b.informer.App().V1alpha1().Applications().Informer().HasSynced,
|
||||
appmgrSynced: b.informer.App().V1alpha1().ApplicationManagers().Informer().HasSynced,
|
||||
opController: NewQueue(b.ctx),
|
||||
}, err
|
||||
|
||||
}
|
||||
|
||||
func (h *Handler) Run(stopCh <-chan struct{}) error {
|
||||
h.informer.Start(stopCh)
|
||||
if !cache.WaitForCacheSync(stopCh, h.appSynced, h.appmgrSynced) {
|
||||
return fmt.Errorf("failed to wait for application caches to sync")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handler) GetServerServiceAccountToken() string {
|
||||
return h.kubeConfig.BearerToken
|
||||
}
|
||||
|
||||
func (h *Handler) GetUserServiceAccountToken(ctx context.Context, user string) (string, error) {
|
||||
kubeClient, err := kubernetes.NewForConfig(h.kubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create kube client: %v", err)
|
||||
return "", err
|
||||
}
|
||||
return utils.GetUserServiceAccountToken(ctx, kubeClient, user)
|
||||
}
|
||||
993
framework/app-service/pkg/apiserver/handler_app.go
Normal file
993
framework/app-service/pkg/apiserver/handler_app.go
Normal file
|
|
@ -0,0 +1,993 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/appcfg"
|
||||
"bytetrade.io/web3os/app-service/pkg/appinstaller"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/client/clientset"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/kubesphere"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/users/userspace"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (h *Handler) status(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
name, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var am v1alpha1.ApplicationManager
|
||||
e := h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
|
||||
if e != nil {
|
||||
if apierrors.IsNotFound(e) {
|
||||
api.HandleNotFound(resp, req, e)
|
||||
return
|
||||
}
|
||||
api.HandleError(resp, req, e)
|
||||
return
|
||||
}
|
||||
now := metav1.Now()
|
||||
sts := appinstaller.Status{
|
||||
Name: am.Spec.AppName,
|
||||
AppID: v1alpha1.AppName(am.Spec.AppName).GetAppID(),
|
||||
Namespace: am.Spec.AppNamespace,
|
||||
CreationTimestamp: now,
|
||||
Source: am.Spec.Source,
|
||||
AppStatus: v1alpha1.ApplicationStatus{
|
||||
State: am.Status.State.String(),
|
||||
Progress: am.Status.Progress,
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
},
|
||||
}
|
||||
|
||||
resp.WriteAsJson(sts)
|
||||
}
|
||||
|
||||
func (h *Handler) appsStatus(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
isSysApp := req.QueryParameter("issysapp")
|
||||
state := req.QueryParameter("state")
|
||||
ss := make([]string, 0)
|
||||
if state != "" {
|
||||
ss = strings.Split(state, "|")
|
||||
}
|
||||
all := make([]string, 0)
|
||||
for _, a := range appstate.All {
|
||||
all = append(all, a.String())
|
||||
}
|
||||
stateSet := sets.NewString(all...)
|
||||
if len(ss) > 0 {
|
||||
stateSet = sets.String{}
|
||||
}
|
||||
for _, s := range ss {
|
||||
stateSet.Insert(s)
|
||||
}
|
||||
|
||||
// filter by application's owner
|
||||
filteredApps := make([]appinstaller.Status, 0)
|
||||
|
||||
appAms, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
for _, am := range appAms {
|
||||
if am.Spec.AppOwner == owner {
|
||||
if !stateSet.Has(am.Status.State.String()) {
|
||||
continue
|
||||
}
|
||||
if len(isSysApp) > 0 && isSysApp == "true" && !userspace.IsSysApp(am.Spec.AppName) {
|
||||
continue
|
||||
}
|
||||
now := metav1.Now()
|
||||
status := appinstaller.Status{
|
||||
Name: am.Spec.AppName,
|
||||
AppID: v1alpha1.AppName(am.Spec.AppName).GetAppID(),
|
||||
Namespace: am.Spec.AppNamespace,
|
||||
CreationTimestamp: now,
|
||||
Source: am.Spec.Source,
|
||||
AppStatus: v1alpha1.ApplicationStatus{
|
||||
State: am.Status.State.String(),
|
||||
Progress: am.Status.Progress,
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
},
|
||||
}
|
||||
|
||||
filteredApps = append(filteredApps, status)
|
||||
}
|
||||
}
|
||||
|
||||
// sort by create time desc
|
||||
sort.Slice(filteredApps, func(i, j int) bool {
|
||||
return filteredApps[j].CreationTimestamp.Before(&filteredApps[i].CreationTimestamp)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{"result": filteredApps})
|
||||
}
|
||||
|
||||
func (h *Handler) operate(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
var am v1alpha1.ApplicationManager
|
||||
name, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
operate := appinstaller.Operate{
|
||||
AppName: am.Spec.AppName,
|
||||
AppNamespace: am.Spec.AppNamespace,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
OpType: am.Status.OpType,
|
||||
OpID: am.Status.OpID,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
State: am.Status.State,
|
||||
Message: am.Status.Message,
|
||||
CreationTimestamp: am.CreationTimestamp,
|
||||
Source: am.Spec.Source,
|
||||
Progress: am.Status.Progress,
|
||||
}
|
||||
|
||||
resp.WriteAsJson(operate)
|
||||
}
|
||||
|
||||
func (h *Handler) appsOperate(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
// filter by application's owner
|
||||
filteredOperates := make([]appinstaller.Operate, 0)
|
||||
for _, am := range ams {
|
||||
if am.Spec.Type != v1alpha1.App {
|
||||
continue
|
||||
}
|
||||
|
||||
if am.Spec.AppOwner == owner {
|
||||
operate := appinstaller.Operate{
|
||||
AppName: am.Spec.AppName,
|
||||
AppNamespace: am.Spec.AppNamespace,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
State: am.Status.State,
|
||||
OpType: am.Status.OpType,
|
||||
OpID: am.Status.OpID,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
Message: am.Status.Message,
|
||||
CreationTimestamp: am.CreationTimestamp,
|
||||
Source: am.Spec.Source,
|
||||
Progress: am.Status.Progress,
|
||||
}
|
||||
filteredOperates = append(filteredOperates, operate)
|
||||
}
|
||||
}
|
||||
|
||||
// sort by create time desc
|
||||
sort.Slice(filteredOperates, func(i, j int) bool {
|
||||
return filteredOperates[j].CreationTimestamp.Before(&filteredOperates[i].CreationTimestamp)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{"result": filteredOperates})
|
||||
}
|
||||
|
||||
func (h *Handler) operateHistory(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
var am v1alpha1.ApplicationManager
|
||||
name, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
key := types.NamespacedName{Name: name}
|
||||
err = h.ctrlClient.Get(req.Request.Context(), key, &am)
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
ops := make([]appinstaller.OperateHistory, 0, len(am.Status.OpRecords))
|
||||
for _, r := range am.Status.OpRecords {
|
||||
op := appinstaller.OperateHistory{
|
||||
AppName: am.Spec.AppName,
|
||||
AppNamespace: am.Spec.AppNamespace,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
OpRecord: v1alpha1.OpRecord{
|
||||
OpType: r.OpType,
|
||||
OpID: r.OpID,
|
||||
Message: r.Message,
|
||||
Source: r.Source,
|
||||
Version: r.Version,
|
||||
Status: r.Status,
|
||||
StateTime: r.StateTime,
|
||||
},
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{"result": ops})
|
||||
}
|
||||
|
||||
func (h *Handler) allAppManagers(req *restful.Request, resp *restful.Response) {
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get appmgr list %v", err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
ret := make([]v1alpha1.ApplicationManager, 0, len(ams))
|
||||
for _, am := range ams {
|
||||
if am.Spec.Type != v1alpha1.App && am.Spec.Type != v1alpha1.Middleware {
|
||||
continue
|
||||
}
|
||||
if userspace.IsSysApp(am.Spec.AppName) {
|
||||
continue
|
||||
}
|
||||
am.ManagedFields = make([]metav1.ManagedFieldsEntry, 0)
|
||||
am.Annotations[api.AppTokenKey] = ""
|
||||
am.Annotations[constants.ApplicationImageLabel] = ""
|
||||
am.Spec.Config = ""
|
||||
am.Status.OpRecords = make([]v1alpha1.OpRecord, 0)
|
||||
ret = append(ret, *am.DeepCopy())
|
||||
}
|
||||
resp.WriteAsJson(ret)
|
||||
}
|
||||
|
||||
func (h *Handler) allOperateHistory(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
source := req.QueryParameter("source")
|
||||
resourceType := req.QueryParameter("resourceType")
|
||||
|
||||
filteredSources := constants.Sources
|
||||
filteredResourceTypes := constants.ResourceTypes
|
||||
if len(source) > 0 {
|
||||
filteredSources = sets.String{}
|
||||
for _, s := range strings.Split(source, "|") {
|
||||
filteredSources.Insert(s)
|
||||
}
|
||||
}
|
||||
if len(resourceType) > 0 {
|
||||
filteredResourceTypes = sets.String{}
|
||||
for _, s := range strings.Split(resourceType, "|") {
|
||||
filteredResourceTypes.Insert(s)
|
||||
}
|
||||
}
|
||||
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
ops := make([]appinstaller.OperateHistory, 0)
|
||||
|
||||
for _, am := range ams {
|
||||
if !filteredResourceTypes.Has(am.Spec.Type.String()) {
|
||||
continue
|
||||
}
|
||||
if am.Spec.AppOwner != owner || userspace.IsSysApp(am.Spec.AppName) {
|
||||
continue
|
||||
}
|
||||
for _, r := range am.Status.OpRecords {
|
||||
if !filteredSources.Has(r.Source) {
|
||||
continue
|
||||
}
|
||||
op := appinstaller.OperateHistory{
|
||||
AppName: am.Spec.AppName,
|
||||
AppNamespace: am.Spec.AppNamespace,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
OpRecord: v1alpha1.OpRecord{
|
||||
OpType: r.OpType,
|
||||
Message: r.Message,
|
||||
Source: r.Source,
|
||||
Version: r.Version,
|
||||
Status: r.Status,
|
||||
StateTime: r.StateTime,
|
||||
},
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
}
|
||||
sort.Slice(ops, func(i, j int) bool {
|
||||
return ops[j].StateTime.Before(ops[i].StateTime)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{"result": ops})
|
||||
}
|
||||
|
||||
func (h *Handler) getApp(req *restful.Request, resp *restful.Response) {
|
||||
client := req.Attribute(constants.KubeSphereClientAttribute).(*clientset.ClientSet)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
appName := req.PathParameter(ParamAppName)
|
||||
name, err := apputils.FmtAppMgrName(appName, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
var app *v1alpha1.Application
|
||||
|
||||
app, err = client.AppClient.AppV1alpha1().Applications().Get(req.Request.Context(), name, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
am, err := client.AppClient.AppV1alpha1().ApplicationManagers().Get(req.Request.Context(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
app.Status.State = am.Status.State.String()
|
||||
|
||||
resp.WriteAsJson(app)
|
||||
}
|
||||
|
||||
func (h *Handler) apps(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
isSysApp := req.QueryParameter("issysapp")
|
||||
state := req.QueryParameter("state")
|
||||
|
||||
ss := make([]string, 0)
|
||||
if state != "" {
|
||||
ss = strings.Split(state, "|")
|
||||
}
|
||||
all := make([]string, 0)
|
||||
for _, a := range appstate.All {
|
||||
all = append(all, a.String())
|
||||
}
|
||||
stateSet := sets.NewString(all...)
|
||||
if len(ss) > 0 {
|
||||
stateSet = sets.String{}
|
||||
}
|
||||
for _, s := range ss {
|
||||
stateSet.Insert(s)
|
||||
}
|
||||
filteredApps := make([]v1alpha1.Application, 0)
|
||||
appsMap := make(map[string]*v1alpha1.Application)
|
||||
appsEntranceMap := make(map[string]*v1alpha1.Application)
|
||||
|
||||
// get pending app's from app managers
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Infof("get app manager list failed %v", err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
for _, am := range ams {
|
||||
if am.Spec.Type != v1alpha1.App {
|
||||
continue
|
||||
}
|
||||
if am.Spec.AppOwner != owner {
|
||||
continue
|
||||
}
|
||||
if len(isSysApp) > 0 && isSysApp == "true" {
|
||||
continue
|
||||
}
|
||||
if userspace.IsSysApp(am.Spec.AppName) {
|
||||
continue
|
||||
}
|
||||
if !stateSet.Has(am.Status.State.String()) {
|
||||
continue
|
||||
}
|
||||
|
||||
var appconfig appcfg.ApplicationConfig
|
||||
err = json.Unmarshal([]byte(am.Spec.Config), &appconfig)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
now := metav1.Now()
|
||||
name, _ := apputils.FmtAppMgrName(am.Spec.AppName, owner, appconfig.Namespace)
|
||||
app := &v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
CreationTimestamp: am.CreationTimestamp,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Name: am.Spec.AppName,
|
||||
RawAppName: am.Spec.RawAppName,
|
||||
Appid: v1alpha1.AppName(am.Spec.AppName).GetAppID(),
|
||||
IsSysApp: v1alpha1.AppName(am.Spec.AppName).IsSysApp(),
|
||||
Namespace: am.Spec.AppNamespace,
|
||||
Owner: owner,
|
||||
Entrances: appconfig.Entrances,
|
||||
SharedEntrances: appconfig.SharedEntrances,
|
||||
Icon: appconfig.Icon,
|
||||
Settings: map[string]string{
|
||||
"title": am.Annotations[constants.ApplicationTitleLabel],
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
State: am.Status.State.String(),
|
||||
Progress: am.Status.Progress,
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
},
|
||||
}
|
||||
appsMap[app.Name] = app
|
||||
}
|
||||
|
||||
allApps, err := h.appLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, a := range allApps {
|
||||
if a.Spec.Owner == owner {
|
||||
if len(isSysApp) > 0 && isSysApp == "true" && strconv.FormatBool(a.Spec.IsSysApp) != isSysApp {
|
||||
continue
|
||||
}
|
||||
appsEntranceMap[a.Name] = a
|
||||
|
||||
if a.Spec.IsSysApp {
|
||||
appsMap[a.Name] = a
|
||||
continue
|
||||
}
|
||||
if v, ok := appsMap[a.Name]; ok {
|
||||
v.Spec.Settings = a.Spec.Settings
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, app := range appsMap {
|
||||
if v, ok := appsEntranceMap[app.Name]; ok {
|
||||
app.Status.EntranceStatuses = v.Status.EntranceStatuses
|
||||
}
|
||||
filteredApps = append(filteredApps, *app)
|
||||
}
|
||||
|
||||
// sort by create time desc
|
||||
sort.Slice(filteredApps, func(i, j int) bool {
|
||||
return filteredApps[i].CreationTimestamp.Before(&filteredApps[j].CreationTimestamp)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(filteredApps)
|
||||
}
|
||||
|
||||
func (h *Handler) pendingOrInstallingApps(req *restful.Request, resp *restful.Response) {
|
||||
ams, err := apputils.GetPendingOrRunningTask(req.Request.Context())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(ams)
|
||||
}
|
||||
|
||||
func (h *Handler) terminusVersion(req *restful.Request, resp *restful.Response) {
|
||||
terminus, err := utils.GetTerminus(req.Request.Context(), h.ctrlClient)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(map[string]interface{}{"version": terminus.Spec.Version})
|
||||
}
|
||||
|
||||
func (h *Handler) nodes(req *restful.Request, resp *restful.Response) {
|
||||
var nodes corev1.NodeList
|
||||
err := h.ctrlClient.List(req.Request.Context(), &nodes, &client.ListOptions{})
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(map[string]interface{}{"result": nodes.Items})
|
||||
}
|
||||
|
||||
//func toProcessing(state v1alpha1.ApplicationManagerState) v1alpha1.ApplicationManagerState {
|
||||
// if state == v1alpha1.Installing || state == v1alpha1.Uninstalling ||
|
||||
// state == v1alpha1.Upgrading || state == v1alpha1.Resuming ||
|
||||
// state == v1alpha1.Canceling || state == v1alpha1.Pending {
|
||||
// return v1alpha1.Processing
|
||||
// }
|
||||
// return state
|
||||
//}
|
||||
|
||||
func (h *Handler) operateRecommend(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamWorkflowName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
var am v1alpha1.ApplicationManager
|
||||
name, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
operate := appinstaller.Operate{
|
||||
AppName: am.Spec.AppName,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
OpType: am.Status.OpType,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
State: am.Status.State,
|
||||
Message: am.Status.Message,
|
||||
CreationTimestamp: am.CreationTimestamp,
|
||||
Source: am.Spec.Source,
|
||||
}
|
||||
resp.WriteAsJson(operate)
|
||||
}
|
||||
|
||||
func (h *Handler) operateRecommendList(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
filteredOperates := make([]appinstaller.Operate, 0)
|
||||
for _, am := range ams {
|
||||
if am.Spec.AppOwner == owner && am.Spec.Type == v1alpha1.Recommend {
|
||||
operate := appinstaller.Operate{
|
||||
AppName: am.Spec.AppName,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
State: am.Status.State,
|
||||
OpType: am.Status.OpType,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
Message: am.Status.Message,
|
||||
CreationTimestamp: am.CreationTimestamp,
|
||||
Source: am.Spec.Source,
|
||||
}
|
||||
filteredOperates = append(filteredOperates, operate)
|
||||
}
|
||||
}
|
||||
// sort by create time desc
|
||||
sort.Slice(filteredOperates, func(i, j int) bool {
|
||||
return filteredOperates[j].CreationTimestamp.Before(&filteredOperates[i].CreationTimestamp)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{"result": filteredOperates})
|
||||
}
|
||||
|
||||
func (h *Handler) operateRecommendHistory(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamWorkflowName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
var am v1alpha1.ApplicationManager
|
||||
name, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
key := types.NamespacedName{Name: name}
|
||||
err = h.ctrlClient.Get(req.Request.Context(), key, &am)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
ops := make([]appinstaller.OperateHistory, 0, len(am.Status.OpRecords))
|
||||
for _, r := range am.Status.OpRecords {
|
||||
op := appinstaller.OperateHistory{
|
||||
AppName: am.Spec.AppName,
|
||||
AppNamespace: am.Spec.AppNamespace,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
|
||||
OpRecord: v1alpha1.OpRecord{
|
||||
OpType: r.OpType,
|
||||
Message: r.Message,
|
||||
Source: r.Source,
|
||||
Version: r.Version,
|
||||
Status: r.Status,
|
||||
StateTime: r.StateTime,
|
||||
},
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
resp.WriteAsJson(map[string]interface{}{"result": ops})
|
||||
}
|
||||
|
||||
func (h *Handler) allOperateRecommendHistory(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
ops := make([]appinstaller.OperateHistory, 0)
|
||||
|
||||
for _, am := range ams {
|
||||
if am.Spec.AppOwner != owner || userspace.IsSysApp(am.Spec.AppName) || am.Spec.Type != v1alpha1.Recommend {
|
||||
continue
|
||||
}
|
||||
for _, r := range am.Status.OpRecords {
|
||||
op := appinstaller.OperateHistory{
|
||||
AppName: am.Spec.AppName,
|
||||
AppNamespace: am.Spec.AppNamespace,
|
||||
AppOwner: am.Spec.AppOwner,
|
||||
ResourceType: am.Spec.Type.String(),
|
||||
|
||||
OpRecord: v1alpha1.OpRecord{
|
||||
OpType: r.OpType,
|
||||
Message: r.Message,
|
||||
Source: r.Source,
|
||||
Version: r.Version,
|
||||
Status: r.Status,
|
||||
StateTime: r.StateTime,
|
||||
},
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
}
|
||||
sort.Slice(ops, func(i, j int) bool {
|
||||
return ops[j].StateTime.Before(ops[i].StateTime)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{"result": ops})
|
||||
}
|
||||
|
||||
func (h *Handler) allUsersApps(req *restful.Request, resp *restful.Response) {
|
||||
//owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
isSysApp := req.QueryParameter("issysapp")
|
||||
state := req.QueryParameter("state")
|
||||
|
||||
ss := make([]string, 0)
|
||||
if state != "" {
|
||||
ss = strings.Split(state, "|")
|
||||
}
|
||||
all := make([]string, 0)
|
||||
for _, a := range appstate.All {
|
||||
all = append(all, a.String())
|
||||
}
|
||||
stateSet := sets.NewString(all...)
|
||||
if len(ss) > 0 {
|
||||
stateSet = sets.String{}
|
||||
}
|
||||
for _, s := range ss {
|
||||
stateSet.Insert(s)
|
||||
}
|
||||
|
||||
filteredApps := make([]v1alpha1.Application, 0)
|
||||
appsMap := make(map[string]*v1alpha1.Application)
|
||||
appsEntranceMap := make(map[string]*v1alpha1.Application)
|
||||
// get pending app's from app managers
|
||||
ams, err := h.appmgrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, am := range ams {
|
||||
if am.Spec.Type != v1alpha1.App {
|
||||
continue
|
||||
}
|
||||
|
||||
if !stateSet.Has(am.Status.State.String()) {
|
||||
continue
|
||||
}
|
||||
if len(isSysApp) > 0 && isSysApp == "true" {
|
||||
continue
|
||||
}
|
||||
if userspace.IsSysApp(am.Spec.AppName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if am.Spec.Config == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var appconfig appcfg.ApplicationConfig
|
||||
err = json.Unmarshal([]byte(am.Spec.Config), &appconfig)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
app := v1alpha1.Application{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: am.Name,
|
||||
CreationTimestamp: am.CreationTimestamp,
|
||||
},
|
||||
Spec: v1alpha1.ApplicationSpec{
|
||||
Name: am.Spec.AppName,
|
||||
RawAppName: am.Spec.RawAppName,
|
||||
Appid: v1alpha1.AppName(am.Spec.AppName).GetAppID(),
|
||||
IsSysApp: v1alpha1.AppName(am.Spec.AppName).IsSysApp(),
|
||||
Namespace: am.Spec.AppNamespace,
|
||||
Owner: am.Spec.AppOwner,
|
||||
Entrances: appconfig.Entrances,
|
||||
SharedEntrances: appconfig.SharedEntrances,
|
||||
Icon: appconfig.Icon,
|
||||
Settings: map[string]string{
|
||||
"title": am.Annotations[constants.ApplicationTitleLabel],
|
||||
},
|
||||
},
|
||||
Status: v1alpha1.ApplicationStatus{
|
||||
State: am.Status.State.String(),
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
},
|
||||
}
|
||||
appsMap[am.Name] = &app
|
||||
}
|
||||
|
||||
allApps, err := h.appLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
// filter by application's owner
|
||||
for _, a := range allApps {
|
||||
if len(isSysApp) > 0 && strconv.FormatBool(a.Spec.IsSysApp) != isSysApp {
|
||||
continue
|
||||
}
|
||||
appsEntranceMap[a.Name] = a
|
||||
|
||||
if a.Spec.IsSysApp {
|
||||
appsMap[a.Name] = a
|
||||
continue
|
||||
}
|
||||
if v, ok := appsMap[a.Name]; ok {
|
||||
v.Spec.Settings = a.Spec.Settings
|
||||
}
|
||||
}
|
||||
|
||||
for _, app := range appsMap {
|
||||
entrances, err := app.GenEntranceURL(req.Request.Context())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
app.Spec.Entrances = entrances
|
||||
|
||||
sharedEntrances, err := app.GenSharedEntranceURL(req.Request.Context())
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
app.Spec.SharedEntrances = sharedEntrances
|
||||
|
||||
if v, ok := appsEntranceMap[app.Name]; ok {
|
||||
app.Status.EntranceStatuses = v.Status.EntranceStatuses
|
||||
}
|
||||
filteredApps = append(filteredApps, *app)
|
||||
}
|
||||
|
||||
// sort by create time desc
|
||||
sort.Slice(filteredApps, func(i, j int) bool {
|
||||
return filteredApps[j].CreationTimestamp.Before(&filteredApps[i].CreationTimestamp)
|
||||
})
|
||||
|
||||
resp.WriteAsJson(filteredApps)
|
||||
}
|
||||
|
||||
func (h *Handler) getAllUser() ([]string, error) {
|
||||
users := make([]string, 0)
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: "iam.kubesphere.io",
|
||||
Version: "v1alpha2",
|
||||
Resource: "users",
|
||||
}
|
||||
dClient, err := dynamic.NewForConfig(h.kubeConfig)
|
||||
if err != nil {
|
||||
return users, err
|
||||
}
|
||||
user, err := dClient.Resource(gvr).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return users, err
|
||||
}
|
||||
for _, u := range user.Items {
|
||||
if u.Object == nil {
|
||||
continue
|
||||
}
|
||||
users = append(users, u.GetName())
|
||||
}
|
||||
return users, nil
|
||||
}
|
||||
|
||||
func (h *Handler) renderManifest(req *restful.Request, resp *restful.Response) {
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
request := api.ManifestRenderRequest{}
|
||||
err := req.ReadEntity(&request)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
admin, err := kubesphere.GetAdminUsername(req.Request.Context(), h.kubeConfig)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
isAdmin, err := kubesphere.IsAdmin(req.Request.Context(), h.kubeConfig, owner)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
renderedYAML, err := utils.RenderManifestFromContent([]byte(request.Content), owner, admin, isAdmin)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteEntity(api.ManifestRenderResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.ManifestRenderRespData{Content: renderedYAML},
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) adminUsername(req *restful.Request, resp *restful.Response) {
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
username, err := kubesphere.GetAdminUsername(req.Request.Context(), config)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteEntity(api.AdminUsernameResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.AdminUsernameRespData{Username: username},
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) adminUserList(req *restful.Request, resp *restful.Response) {
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
adminList, err := kubesphere.GetAdminUserList(req.Request.Context(), config)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
admins := make([]string, 0, len(adminList))
|
||||
for _, a := range adminList {
|
||||
admins = append(admins, a.Name)
|
||||
}
|
||||
resp.WriteEntity(api.AdminListResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: admins,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) oamValues(req *restful.Request, resp *restful.Response) {
|
||||
values := map[string]interface{}{
|
||||
"admin": "admin",
|
||||
"bfl": map[string]string{
|
||||
"username": "admin",
|
||||
},
|
||||
}
|
||||
|
||||
var nodes corev1.NodeList
|
||||
err := h.ctrlClient.List(req.Request.Context(), &nodes, &client.ListOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("list node failed %v", err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
gpuType, err := utils.FindGpuTypeFromNodes(&nodes)
|
||||
if err != nil {
|
||||
klog.Errorf("get gpu type failed %v", gpuType)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
values["GPU"] = map[string]interface{}{
|
||||
"Type": gpuType,
|
||||
"Cuda": os.Getenv("OLARES_SYSTEM_CUDA_VERSION"),
|
||||
}
|
||||
values["user"] = map[string]interface{}{
|
||||
"zone": "user-zone",
|
||||
}
|
||||
values["schedule"] = map[string]interface{}{
|
||||
"nodeName": "node",
|
||||
}
|
||||
values["oidc"] = map[string]interface{}{
|
||||
"client": map[string]interface{}{},
|
||||
"issuer": "issuer",
|
||||
}
|
||||
values["userspace"] = map[string]interface{}{
|
||||
"appCache": "appcache",
|
||||
"userData": "userspace/Home",
|
||||
}
|
||||
values["os"] = map[string]interface{}{
|
||||
"appKey": "appKey",
|
||||
"appSecret": "appSecret",
|
||||
}
|
||||
|
||||
values["domain"] = map[string]string{}
|
||||
values["cluster"] = map[string]string{}
|
||||
values["dep"] = map[string]interface{}{}
|
||||
values["postgres"] = map[string]interface{}{
|
||||
"databases": map[string]interface{}{},
|
||||
}
|
||||
values["mariadb"] = map[string]interface{}{
|
||||
"databases": map[string]interface{}{},
|
||||
}
|
||||
values["mysql"] = map[string]interface{}{
|
||||
"databases": map[string]interface{}{},
|
||||
}
|
||||
values["minio"] = map[string]interface{}{
|
||||
"buckets": map[string]interface{}{},
|
||||
}
|
||||
values["rabbitmq"] = map[string]interface{}{
|
||||
"vhosts": map[string]interface{}{},
|
||||
}
|
||||
values["elasticsearch"] = map[string]interface{}{
|
||||
"indexes": map[string]interface{}{},
|
||||
}
|
||||
values["redis"] = map[string]interface{}{}
|
||||
values["mongodb"] = map[string]interface{}{
|
||||
"databases": map[string]interface{}{},
|
||||
}
|
||||
values["svcs"] = map[string]interface{}{}
|
||||
values["nats"] = map[string]interface{}{
|
||||
"subjects": map[string]interface{}{},
|
||||
"refs": map[string]interface{}{},
|
||||
}
|
||||
resp.WriteAsJson(values)
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/helm"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
)
|
||||
|
||||
func (h *Handler) releaseVersion(req *restful.Request, resp *restful.Response) {
|
||||
appName := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute)
|
||||
appNamespace, err := utils.AppNamespace(appName, owner.(string), "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
actionConfig, _, err := helm.InitConfig(h.kubeConfig, appNamespace)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
version, _, err := apputils.GetDeployedReleaseVersion(actionConfig, appName)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(api.ReleaseVersionResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.ReleaseVersionData{Version: version},
|
||||
})
|
||||
}
|
||||
167
framework/app-service/pkg/apiserver/handler_appenv.go
Normal file
167
framework/app-service/pkg/apiserver/handler_appenv.go
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type remoteOptionsProxyRequest struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
}
|
||||
|
||||
func (h *Handler) getAppEnv(req *restful.Request, resp *restful.Response) {
|
||||
appName := req.PathParameter(ParamAppName)
|
||||
owner := getCurrentUser(req)
|
||||
|
||||
if appName == "" || owner == "" {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("app name and owner are required"))
|
||||
return
|
||||
}
|
||||
|
||||
appNamespace, err := utils.AppNamespace(appName, owner, "")
|
||||
if err != nil {
|
||||
api.HandleInternalError(resp, req, fmt.Errorf("failed to get app namespace: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
envs := make([]sysv1alpha1.AppEnvVar, 0)
|
||||
var appEnv sysv1alpha1.AppEnv
|
||||
if err := client.IgnoreNotFound(h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Namespace: appNamespace, Name: apputils.FormatAppEnvName(appName, owner)}, &appEnv)); err != nil {
|
||||
api.HandleInternalError(resp, req, err)
|
||||
return
|
||||
}
|
||||
if len(appEnv.Envs) > 0 {
|
||||
envs = appEnv.Envs
|
||||
}
|
||||
|
||||
resp.WriteAsJson(envs)
|
||||
}
|
||||
|
||||
func (h *Handler) updateAppEnv(req *restful.Request, resp *restful.Response) {
|
||||
appName := req.PathParameter(ParamAppName)
|
||||
owner := getCurrentUser(req)
|
||||
|
||||
if appName == "" || owner == "" {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("app name and owner are required"))
|
||||
return
|
||||
}
|
||||
|
||||
var updatedEnvs []sysv1alpha1.AppEnvVar
|
||||
if err := req.ReadEntity(&updatedEnvs); err != nil {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("failed to parse request body: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
appNamespace, err := utils.AppNamespace(appName, owner, "")
|
||||
if err != nil {
|
||||
api.HandleInternalError(resp, req, fmt.Errorf("failed to get app namespace: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
var targetAppEnv sysv1alpha1.AppEnv
|
||||
if err := h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Namespace: appNamespace, Name: apputils.FormatAppEnvName(appName, owner)}, &targetAppEnv); err != nil {
|
||||
api.HandleInternalError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
updated := false
|
||||
original := targetAppEnv.DeepCopy()
|
||||
for i, existingEnv := range targetAppEnv.Envs {
|
||||
for _, env := range updatedEnvs {
|
||||
if existingEnv.EnvName == env.EnvName {
|
||||
if !existingEnv.Editable {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("app env '%s' is not editable", env.EnvName))
|
||||
return
|
||||
}
|
||||
if existingEnv.Required && existingEnv.Default == "" && env.Value == "" {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("app env '%s' is required", env.EnvName))
|
||||
return
|
||||
}
|
||||
if existingEnv.Value != env.Value {
|
||||
if err := existingEnv.ValidateValue(env.Value); err != nil {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("failed to update app env '%s': %v", env.EnvName, err))
|
||||
return
|
||||
}
|
||||
targetAppEnv.Envs[i].Value = env.Value
|
||||
updated = true
|
||||
if existingEnv.ApplyOnChange {
|
||||
targetAppEnv.NeedApply = true
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if updated {
|
||||
if err := h.ctrlClient.Patch(req.Request.Context(), &targetAppEnv, client.MergeFrom(original)); err != nil {
|
||||
api.HandleInternalError(resp, req, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
resp.WriteAsJson(targetAppEnv.Envs)
|
||||
}
|
||||
|
||||
func (h *Handler) proxyRemoteOptions(req *restful.Request, resp *restful.Response) {
|
||||
var body remoteOptionsProxyRequest
|
||||
if err := req.ReadEntity(&body); err != nil {
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
if body.Endpoint == "" {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("endpoint is required"))
|
||||
return
|
||||
}
|
||||
u, err := url.Parse(body.Endpoint)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("invalid endpoint: %w", err))
|
||||
return
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("unsupported scheme: %s", u.Scheme))
|
||||
return
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
httpReq, err := http.NewRequestWithContext(req.Request.Context(), http.MethodGet, body.Endpoint, nil)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
r, err := client.Do(httpReq)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
if r.StatusCode < 200 || r.StatusCode >= 300 {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("unexpected status code: %d", r.StatusCode))
|
||||
return
|
||||
}
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var items []sysv1alpha1.EnvValueOptionItem
|
||||
if err := json.Unmarshal(data, &items); err != nil {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("invalid RemoteOptions body: %w", err))
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(items)
|
||||
}
|
||||
89
framework/app-service/pkg/apiserver/handler_applyenv.go
Normal file
89
framework/app-service/pkg/apiserver/handler_applyenv.go
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
appv1alpha1 "bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (h *Handler) appApplyEnv(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
var appMgr appv1alpha1.ApplicationManager
|
||||
appMgrName, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: appMgrName}, &appMgr)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !appstate.IsOperationAllowed(appMgr.Status.State, appv1alpha1.ApplyEnvOp) {
|
||||
err = fmt.Errorf("%s operation is not allowed for %s state", appv1alpha1.ApplyEnvOp, appMgr.Status.State)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
token, err := h.GetUserServiceAccountToken(req.Request.Context(), owner)
|
||||
if err != nil {
|
||||
klog.Error("Failed to get user service account token: ", err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
appCopy := appMgr.DeepCopy()
|
||||
appCopy.Spec.OpType = appv1alpha1.ApplyEnvOp
|
||||
appCopy.Annotations[api.AppTokenKey] = token
|
||||
|
||||
err = h.ctrlClient.Patch(req.Request.Context(), appCopy, client.MergeFrom(&appMgr))
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
now := metav1.Now()
|
||||
opID := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
status := appv1alpha1.ApplicationManagerStatus{
|
||||
OpType: appv1alpha1.ApplyEnvOp,
|
||||
OpID: opID,
|
||||
State: appv1alpha1.ApplyingEnv,
|
||||
Message: "waiting for applying env",
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
}
|
||||
|
||||
am, err := apputils.UpdateAppMgrStatus(appMgrName, status)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
utils.PublishAppEvent(utils.EventParams{
|
||||
Owner: am.Spec.AppOwner,
|
||||
Name: am.Spec.AppName,
|
||||
OpType: string(am.Status.OpType),
|
||||
OpID: opID,
|
||||
State: appv1alpha1.ApplyingEnv.String(),
|
||||
RawAppName: am.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: apputils.AppTitle(am.Spec.Config),
|
||||
})
|
||||
|
||||
resp.WriteEntity(api.Response{Code: 200})
|
||||
}
|
||||
71
framework/app-service/pkg/apiserver/handler_callback.go
Normal file
71
framework/app-service/pkg/apiserver/handler_callback.go
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/appwatchers"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var singleTask *sync.Once = &sync.Once{}
|
||||
|
||||
func (h *Handler) highload(req *restful.Request, resp *restful.Response) {
|
||||
var load struct {
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
}
|
||||
|
||||
err := req.ReadEntity(&load)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to read request err=%v", err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
klog.Infof("System resources high load cpu=%v memory=%v", load.CPU, load.Memory)
|
||||
|
||||
// start application suspending task
|
||||
singleTask.Do(func() {
|
||||
go func() {
|
||||
err := appwatchers.SuspendTopApp(h.serviceCtx, h.ctrlClient)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to suspend applications err=%v", err)
|
||||
}
|
||||
singleTask = &sync.Once{}
|
||||
}()
|
||||
})
|
||||
|
||||
resp.WriteAsJson(map[string]int{"code": 0})
|
||||
|
||||
}
|
||||
|
||||
var userSingleTask = &sync.Once{}
|
||||
|
||||
func (h *Handler) userHighLoad(req *restful.Request, resp *restful.Response) {
|
||||
var load struct {
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
User string `json:"user"`
|
||||
}
|
||||
|
||||
err := req.ReadEntity(&load)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to read request err=%v", err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
klog.Infof("User: %s resources high load, cpu %.2f, mem %.2f", load.User, load.CPU, load.Memory)
|
||||
|
||||
userSingleTask.Do(func() {
|
||||
go func() {
|
||||
err := appwatchers.SuspendUserTopApp(h.serviceCtx, h.ctrlClient, load.User)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to suspend application user=%s err=%v", load.User, err)
|
||||
}
|
||||
userSingleTask = &sync.Once{}
|
||||
}()
|
||||
})
|
||||
resp.WriteAsJson(map[string]int{"code": 0})
|
||||
}
|
||||
174
framework/app-service/pkg/apiserver/handler_gpu.go
Normal file
174
framework/app-service/pkg/apiserver/handler_gpu.go
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/client/clientset"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var running bool = false
|
||||
var switchLock sync.Mutex
|
||||
|
||||
func (h *Handler) disableGpuManagedMemory(req *restful.Request, resp *restful.Response) {
|
||||
if err := h.nvshareSwitch(req, false); err != nil {
|
||||
api.HandleError(resp, req, &errors.StatusError{
|
||||
ErrStatus: metav1.Status{Code: 400, Message: "operation failed, " + err.Error()},
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(map[string]int{"code": 0})
|
||||
}
|
||||
|
||||
func (h *Handler) enableGpuManagedMemory(req *restful.Request, resp *restful.Response) {
|
||||
if err := h.nvshareSwitch(req, true); err != nil {
|
||||
api.HandleError(resp, req, &errors.StatusError{
|
||||
ErrStatus: metav1.Status{Code: 400, Message: "operation failed, " + err.Error()},
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
resp.WriteAsJson(map[string]int{"code": 0})
|
||||
}
|
||||
|
||||
func (h *Handler) nvshareSwitch(req *restful.Request, enable bool) error {
|
||||
client := req.Attribute(constants.KubeSphereClientAttribute).(*clientset.ClientSet)
|
||||
switchLock.Lock()
|
||||
defer switchLock.Unlock()
|
||||
|
||||
if running {
|
||||
return fmt.Errorf("last operation is still running")
|
||||
}
|
||||
|
||||
deployments, err := client.KubeClient.Kubernetes().AppsV1().Deployments("").List(req.Request.Context(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.Error("list deployment error, ", err)
|
||||
return err
|
||||
}
|
||||
|
||||
envValue := "0"
|
||||
if enable {
|
||||
envValue = "1"
|
||||
}
|
||||
|
||||
for _, d := range deployments.Items {
|
||||
shouldUpdate := false
|
||||
for i, c := range d.Spec.Template.Spec.Containers {
|
||||
found := false
|
||||
for k := range c.Resources.Limits {
|
||||
if k == constants.NvshareGPU {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
// a gpu request container
|
||||
addEnv := true
|
||||
for n, env := range d.Spec.Template.Spec.Containers[i].Env {
|
||||
if env.Name == constants.EnvNvshareManagedMemory {
|
||||
addEnv = false
|
||||
d.Spec.Template.Spec.Containers[i].Env[n].Value = envValue
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if addEnv {
|
||||
d.Spec.Template.Spec.Containers[i].Env =
|
||||
append(d.Spec.Template.Spec.Containers[i].Env,
|
||||
corev1.EnvVar{Name: constants.EnvNvshareManagedMemory, Value: envValue})
|
||||
}
|
||||
|
||||
shouldUpdate = true
|
||||
} // end found
|
||||
} // end of container loop
|
||||
|
||||
if shouldUpdate {
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
deployment, err := client.KubeClient.Kubernetes().AppsV1().Deployments(d.Namespace).
|
||||
Get(req.Request.Context(), d.Name, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deployment.Spec.Template.Spec.Containers = d.Spec.Template.Spec.Containers
|
||||
|
||||
_, err = client.KubeClient.Kubernetes().AppsV1().Deployments(d.Namespace).
|
||||
Update(req.Request.Context(), deployment, metav1.UpdateOptions{})
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
klog.Error("update deployment error, ", err, ", ", d.Name, ", ", d.Namespace)
|
||||
return err
|
||||
}
|
||||
} // should update
|
||||
} // end of deployment loop
|
||||
|
||||
// update terminus
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
terminus, err := utils.GetTerminus(req.Request.Context(), h.ctrlClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
terminus.Spec.Settings[constants.EnvNvshareManagedMemory] = envValue
|
||||
|
||||
return h.ctrlClient.Update(req.Request.Context(), terminus)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
klog.Error("update terminus error, ", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
running = true
|
||||
// delay 30s, assume the all pods will be reload in 30s.
|
||||
delay := time.NewTimer(30 * time.Second)
|
||||
go func() {
|
||||
<-delay.C
|
||||
switchLock.Lock()
|
||||
defer switchLock.Unlock()
|
||||
|
||||
running = false
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handler) getManagedMemoryValue(req *restful.Request, resp *restful.Response) {
|
||||
terminus, err := utils.GetTerminus(req.Request.Context(), h.ctrlClient)
|
||||
if err != nil {
|
||||
klog.Error("get terminus value error, ", err)
|
||||
api.HandleError(resp, req, &errors.StatusError{
|
||||
ErrStatus: metav1.Status{Code: 400, Message: "get value error, " + err.Error()},
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
managed := true
|
||||
if v, ok := terminus.Spec.Settings[constants.EnvNvshareManagedMemory]; ok && v == "0" {
|
||||
managed = false
|
||||
}
|
||||
|
||||
resp.WriteAsJson(&map[string]interface{}{
|
||||
"managed_memory": managed,
|
||||
},
|
||||
)
|
||||
}
|
||||
105
framework/app-service/pkg/apiserver/handler_installer_cancel.go
Normal file
105
framework/app-service/pkg/apiserver/handler_installer_cancel.go
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func (h *Handler) cancel(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
// type = timeout | operate
|
||||
cancelType := req.QueryParameter("type")
|
||||
if cancelType == "" {
|
||||
cancelType = "operate"
|
||||
}
|
||||
|
||||
name, err := apputils.FmtAppMgrName(app, owner, "")
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var am v1alpha1.ApplicationManager
|
||||
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
state := am.Status.State
|
||||
if !appstate.IsOperationAllowed(state, v1alpha1.CancelOp) {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("%s operation is not allowed for %s state", v1alpha1.CancelOp, am.Status.State))
|
||||
|
||||
return
|
||||
}
|
||||
var cancelState v1alpha1.ApplicationManagerState
|
||||
switch state {
|
||||
case v1alpha1.Pending, v1alpha1.PendingCancelFailed:
|
||||
cancelState = v1alpha1.PendingCanceling
|
||||
case v1alpha1.Downloading, v1alpha1.DownloadingCancelFailed:
|
||||
cancelState = v1alpha1.DownloadingCanceling
|
||||
case v1alpha1.Installing, v1alpha1.InstallingCancelFailed:
|
||||
cancelState = v1alpha1.InstallingCanceling
|
||||
case v1alpha1.Initializing:
|
||||
cancelState = v1alpha1.InitializingCanceling
|
||||
case v1alpha1.Resuming:
|
||||
cancelState = v1alpha1.ResumingCanceling
|
||||
case v1alpha1.Upgrading:
|
||||
cancelState = v1alpha1.UpgradingCanceling
|
||||
case v1alpha1.ApplyingEnv:
|
||||
cancelState = v1alpha1.ApplyingEnvCanceling
|
||||
}
|
||||
opID := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
am.Spec.OpType = v1alpha1.CancelOp
|
||||
err = h.ctrlClient.Update(req.Request.Context(), &am)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
now := metav1.Now()
|
||||
status := v1alpha1.ApplicationManagerStatus{
|
||||
OpType: v1alpha1.CancelOp,
|
||||
OpID: opID,
|
||||
LastState: am.Status.LastState,
|
||||
State: cancelState,
|
||||
Progress: "0.00",
|
||||
Message: cancelType,
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
}
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
a, err := apputils.UpdateAppMgrStatus(name, status)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
utils.PublishAppEvent(utils.EventParams{
|
||||
Owner: a.Spec.AppOwner,
|
||||
Name: a.Spec.AppName,
|
||||
OpType: string(a.Status.OpType),
|
||||
OpID: opID,
|
||||
State: cancelState.String(),
|
||||
RawAppName: a.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: apputils.AppTitle(a.Spec.Config),
|
||||
})
|
||||
|
||||
resp.WriteAsJson(api.InstallationResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.InstallationResponseData{UID: app, OpID: opID},
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func (h *Handler) imageInfo(req *restful.Request, resp *restful.Response) {
|
||||
imageReq := &api.ImageInfoRequest{}
|
||||
err := req.ReadEntity(imageReq)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
if imageReq.AppName == "" || len(imageReq.Images) == 0 {
|
||||
api.HandleBadRequest(resp, req, errors.New("empty name or images"))
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
klog.Infof("received app %s fetch image info request", imageReq.AppName)
|
||||
|
||||
err = createAppImage(req.Request.Context(), h.ctrlClient, imageReq)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
var am v1alpha1.AppImage
|
||||
err = wait.PollImmediate(time.Second, 2*time.Minute, func() (done bool, err error) {
|
||||
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: imageReq.AppName}, &am)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
if am.Status.State == "completed" {
|
||||
return true, nil
|
||||
}
|
||||
if am.Status.State == "failed" {
|
||||
return false, errors.New(am.Status.Message)
|
||||
}
|
||||
klog.Infof("poll app %s image info response", imageReq.AppName)
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("poll app %s image info failed %v", imageReq.AppName, err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
err = h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: imageReq.AppName}, &am)
|
||||
if err != nil {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
klog.Infof("finished app %s fetch image info request, time elapsed: %v", imageReq.AppName, time.Since(start))
|
||||
|
||||
resp.WriteAsJson(map[string]interface{}{
|
||||
"name": imageReq.AppName,
|
||||
"images": am.Status.Images,
|
||||
})
|
||||
}
|
||||
|
||||
func createAppImage(ctx context.Context, ctrlClient client.Client, request *api.ImageInfoRequest) error {
|
||||
var nodes corev1.NodeList
|
||||
err := ctrlClient.List(ctx, &nodes, &client.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeList := make([]string, 0)
|
||||
for _, node := range nodes.Items {
|
||||
if !utils.IsNodeReady(&node) || node.Spec.Unschedulable {
|
||||
continue
|
||||
}
|
||||
nodeList = append(nodeList, node.Name)
|
||||
}
|
||||
if len(nodeList) == 0 {
|
||||
return errors.New("cluster has no suitable node to schedule")
|
||||
}
|
||||
var am v1alpha1.AppImage
|
||||
err = ctrlClient.Get(ctx, types.NamespacedName{Name: request.AppName}, &am)
|
||||
if err == nil {
|
||||
if am.Status.State != "completed" && am.Status.State != "failed" {
|
||||
return nil
|
||||
}
|
||||
err = ctrlClient.Delete(ctx, &am)
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
imageList := make([]string, 0, len(request.Images))
|
||||
for _, image := range request.Images {
|
||||
imageList = append(imageList, image.ImageName)
|
||||
}
|
||||
imagesString, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
klog.Errorf("marshal appimage request failed %v", err)
|
||||
return err
|
||||
}
|
||||
m := v1alpha1.AppImage{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: request.AppName,
|
||||
Annotations: map[string]string{
|
||||
api.AppImagesKey: string(imagesString),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ImageSpec{
|
||||
AppName: request.AppName,
|
||||
Refs: imageList,
|
||||
Nodes: nodeList,
|
||||
},
|
||||
}
|
||||
err = ctrlClient.Create(ctx, &m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
778
framework/app-service/pkg/apiserver/handler_installer_install.go
Normal file
778
framework/app-service/pkg/apiserver/handler_installer_install.go
Normal file
|
|
@ -0,0 +1,778 @@
|
|||
package apiserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"bytetrade.io/web3os/app-service/pkg/users/userspace"
|
||||
|
||||
"bytetrade.io/web3os/app-service/api/app.bytetrade.io/v1alpha1"
|
||||
sysv1alpha1 "bytetrade.io/web3os/app-service/api/sys.bytetrade.io/v1alpha1"
|
||||
"bytetrade.io/web3os/app-service/pkg/apiserver/api"
|
||||
"bytetrade.io/web3os/app-service/pkg/appcfg"
|
||||
"bytetrade.io/web3os/app-service/pkg/appstate"
|
||||
"bytetrade.io/web3os/app-service/pkg/constants"
|
||||
"bytetrade.io/web3os/app-service/pkg/generated/clientset/versioned"
|
||||
"bytetrade.io/web3os/app-service/pkg/kubesphere"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils"
|
||||
apputils "bytetrade.io/web3os/app-service/pkg/utils/app"
|
||||
"bytetrade.io/web3os/app-service/pkg/utils/config"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"helm.sh/helm/v3/pkg/time"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type depRequest struct {
|
||||
Data []appcfg.Dependency `json:"data"`
|
||||
}
|
||||
|
||||
type installHelperIntf interface {
|
||||
getAdminUsers() (admin []string, isAdmin bool, err error)
|
||||
getInstalledApps() (installed bool, app []*v1alpha1.Application, err error)
|
||||
getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion string) (err error)
|
||||
setAppConfig(req *api.InstallRequest, appName string)
|
||||
validate(bool, []*v1alpha1.Application) error
|
||||
setAppEnv(overrides []sysv1alpha1.AppEnvVar) error
|
||||
applyAppEnv(ctx context.Context) error
|
||||
applyApplicationManager(marketSource string) (opID string, err error)
|
||||
}
|
||||
|
||||
var _ installHelperIntf = (*installHandlerHelper)(nil)
|
||||
var _ installHelperIntf = (*installHandlerHelperV2)(nil)
|
||||
|
||||
type installHandlerHelper struct {
|
||||
h *Handler
|
||||
req *restful.Request
|
||||
resp *restful.Response
|
||||
app string
|
||||
rawAppName string
|
||||
owner string
|
||||
token string
|
||||
insReq *api.InstallRequest
|
||||
appConfig *appcfg.ApplicationConfig
|
||||
client *versioned.Clientset
|
||||
validateClusterScope func(isAdmin bool, installedApps []*v1alpha1.Application) (err error)
|
||||
}
|
||||
|
||||
type installHandlerHelperV2 struct {
|
||||
installHandlerHelper
|
||||
}
|
||||
|
||||
func (h *Handler) install(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
var err error
|
||||
token, err := h.GetUserServiceAccountToken(req.Request.Context(), owner)
|
||||
if err != nil {
|
||||
klog.Error("Failed to get user service account token: ", err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
marketSource := req.HeaderParameter(constants.MarketSource)
|
||||
klog.Infof("install: user: %v, source: %v", owner, marketSource)
|
||||
|
||||
insReq := &api.InstallRequest{}
|
||||
err = req.ReadEntity(insReq)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
klog.Infof("insReq: %#v", insReq)
|
||||
if insReq.Source != api.Market && insReq.Source != api.Custom && insReq.Source != api.DevBox {
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("unsupported chart source: %s", insReq.Source))
|
||||
return
|
||||
}
|
||||
rawAppName := app
|
||||
if insReq.RawAppName != "" {
|
||||
rawAppName = insReq.RawAppName
|
||||
}
|
||||
klog.Infof("rawAppName: %s", rawAppName)
|
||||
chartVersion := ""
|
||||
if insReq.RawAppName != "" {
|
||||
chartVersion, err = h.getOriginChartVersion(rawAppName, owner)
|
||||
if err != nil {
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
apiVersion, appCfg, err := apputils.GetApiVersionFromAppConfig(req.Request.Context(), &apputils.ConfigOptions{
|
||||
App: app,
|
||||
RawAppName: rawAppName,
|
||||
Owner: owner,
|
||||
RepoURL: insReq.RepoURL,
|
||||
MarketSource: marketSource,
|
||||
Version: chartVersion,
|
||||
})
|
||||
klog.Infof("chartVersion: %s", chartVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get api version err=%v", err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
if !appCfg.AllowMultipleInstall && insReq.RawAppName != "" || (appCfg.AllowMultipleInstall && (apiVersion == appcfg.V2 || appCfg.AppScope.ClusterScoped)) {
|
||||
klog.Errorf("app %s can not be clone", app)
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("app %s can not be clone", app))
|
||||
return
|
||||
}
|
||||
|
||||
client, err := utils.GetClient()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get client err=%v", err)
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var helper installHelperIntf
|
||||
switch apiVersion {
|
||||
case appcfg.V1:
|
||||
klog.Info("Using install handler helper for V1")
|
||||
h := &installHandlerHelper{
|
||||
h: h,
|
||||
req: req,
|
||||
resp: resp,
|
||||
app: app,
|
||||
rawAppName: rawAppName,
|
||||
owner: owner,
|
||||
token: token,
|
||||
insReq: insReq,
|
||||
client: client,
|
||||
}
|
||||
|
||||
h.validateClusterScope = h._validateClusterScope
|
||||
|
||||
helper = h
|
||||
case appcfg.V2:
|
||||
klog.Info("Using install handler helper for V2")
|
||||
h := &installHandlerHelperV2{
|
||||
installHandlerHelper: installHandlerHelper{
|
||||
h: h,
|
||||
req: req,
|
||||
resp: resp,
|
||||
app: app,
|
||||
rawAppName: rawAppName,
|
||||
owner: owner,
|
||||
token: token,
|
||||
insReq: insReq,
|
||||
client: client,
|
||||
},
|
||||
}
|
||||
|
||||
h.validateClusterScope = h._validateClusterScope
|
||||
helper = h
|
||||
default:
|
||||
klog.Errorf("Unsupported app config api version: %s", apiVersion)
|
||||
api.HandleBadRequest(resp, req, fmt.Errorf("unsupported app config api version: %s", apiVersion))
|
||||
return
|
||||
}
|
||||
|
||||
adminUsers, isAdmin, err := helper.getAdminUsers()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get admin user err=%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// V2: get current user role and check if the app is installed by admin
|
||||
appInstalled, installedApps, err := helper.getInstalledApps()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get installed app err=%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = helper.getAppConfig(adminUsers, marketSource, isAdmin, appInstalled, installedApps, chartVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get app config err=%v", err)
|
||||
return
|
||||
}
|
||||
err = helper.setAppEnv(insReq.Envs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to set app env err=%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = helper.validate(isAdmin, installedApps)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to validate app install request err=%v", err)
|
||||
return
|
||||
}
|
||||
if insReq.RawAppName != "" && insReq.Title != "" {
|
||||
helper.setAppConfig(insReq, app)
|
||||
}
|
||||
|
||||
err = helper.applyAppEnv(req.Request.Context())
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to apply app env err=%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// create ApplicationManager
|
||||
opID, err := helper.applyApplicationManager(marketSource)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to apply application manager err=%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteEntity(api.InstallationResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.InstallationResponseData{UID: app, OpID: opID},
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) getOriginChartVersion(rawAppName, owner string) (string, error) {
|
||||
var ams v1alpha1.ApplicationManagerList
|
||||
err := h.ctrlClient.List(context.TODO(), &ams)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, am := range ams.Items {
|
||||
if am.Spec.AppName == rawAppName && am.Spec.AppOwner == owner {
|
||||
return am.Annotations[api.AppVersionKey], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("rawApp %s not found", rawAppName)
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) getAdminUsers() (admin []string, isAdmin bool, err error) {
|
||||
adminList, err := kubesphere.GetAdminUserList(h.req.Request.Context(), h.h.kubeConfig)
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, user := range adminList {
|
||||
admin = append(admin, user.Name)
|
||||
if user.Name == h.owner {
|
||||
isAdmin = true
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) validate(isAdmin bool, installedApps []*v1alpha1.Application) (err error) {
|
||||
unSatisfiedDeps, err := CheckDependencies(h.req.Request.Context(), h.appConfig.Dependencies, h.h.ctrlClient, h.owner, true)
|
||||
|
||||
responseBadRequest := func(e error) {
|
||||
err = e
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
}
|
||||
result, err := apputils.CheckCloneEntrances(h.h.ctrlClient, h.appConfig, h.insReq)
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return err
|
||||
}
|
||||
if result != nil {
|
||||
api.HandleFailedCheck(h.resp, api.CheckTypeAppEntrance, result, 104222)
|
||||
return fmt.Errorf("invalid entrance config, check result: %#v", result)
|
||||
}
|
||||
|
||||
reasons, err := apputils.CheckHardwareRequirement(h.req.Request.Context(), h.appConfig)
|
||||
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
if len(reasons) > 0 {
|
||||
err = h.resp.WriteHeaderAndEntity(http.StatusBadRequest, map[string]any{
|
||||
"code": http.StatusBadRequest,
|
||||
"result": reasons,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Infof("failed to write hardware reason: %v", err)
|
||||
}
|
||||
return errors.New("invalid spec.hardware config or no node satisfied hardware requirement")
|
||||
}
|
||||
|
||||
err = apputils.CheckDependencies2(h.req.Request.Context(), h.h.ctrlClient, h.appConfig.Dependencies, h.owner, true)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check dependencies err=%v", err)
|
||||
responseBadRequest(FormatDependencyError(unSatisfiedDeps))
|
||||
return
|
||||
}
|
||||
|
||||
err = apputils.CheckConflicts(h.req.Request.Context(), h.appConfig.Conflicts, h.owner)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check installed conflict app err=%v", err)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = apputils.CheckTailScaleACLs(h.appConfig.TailScale.ACLs)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check TailScale ACLs err=%v", err)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = apputils.CheckCfgFileVersion(h.appConfig.CfgFileVersion, config.MinCfgFileVersion)
|
||||
if err != nil {
|
||||
responseBadRequest(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = apputils.CheckNamespace(h.appConfig.Namespace)
|
||||
if err != nil {
|
||||
responseBadRequest(err)
|
||||
return
|
||||
}
|
||||
|
||||
if !isAdmin && h.appConfig.OnlyAdmin {
|
||||
responseBadRequest(errors.New("only admin user can install this app"))
|
||||
return
|
||||
}
|
||||
|
||||
if !isAdmin && h.appConfig.AppScope.ClusterScoped {
|
||||
responseBadRequest(errors.New("only admin user can create cluster level app"))
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.validateClusterScope(isAdmin, installedApps); err != nil {
|
||||
klog.Errorf("Failed to validate cluster scope err=%v", err)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
//resourceType, err := CheckAppRequirement(h.h.kubeConfig, h.token, h.appConfig)
|
||||
resourceType, err := apputils.CheckAppRequirement(h.token, h.appConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check app requirement err=%v", err)
|
||||
h.resp.WriteHeaderAndEntity(http.StatusBadRequest, api.RequirementResp{
|
||||
Response: api.Response{Code: 400},
|
||||
Resource: resourceType,
|
||||
Message: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
resourceType, err = apputils.CheckUserResRequirement(h.req.Request.Context(), h.appConfig, h.owner)
|
||||
if err != nil {
|
||||
h.resp.WriteHeaderAndEntity(http.StatusBadRequest, api.RequirementResp{
|
||||
Response: api.Response{Code: 400},
|
||||
Resource: resourceType,
|
||||
Message: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
satisfied, err := apputils.CheckMiddlewareRequirement(h.req.Request.Context(), h.h.ctrlClient, h.appConfig.Middleware)
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
if !satisfied {
|
||||
err = fmt.Errorf("middleware requirement can not be satisfied")
|
||||
h.resp.WriteHeaderAndEntity(http.StatusBadRequest, api.RequirementResp{
|
||||
Response: api.Response{Code: 400},
|
||||
Resource: "middleware",
|
||||
Message: "middleware requirement can not be satisfied",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
ret, err := apputils.CheckAppEnvs(h.req.Request.Context(), h.h.ctrlClient, h.appConfig.Envs, h.owner)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check app environment config err=%v", err)
|
||||
api.HandleInternalError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
if ret != nil {
|
||||
api.HandleFailedCheck(h.resp, api.CheckTypeAppEnv, ret, http.StatusUnprocessableEntity)
|
||||
return fmt.Errorf("Invalid appenv config, check result: %#v", ret)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) _validateClusterScope(isAdmin bool, installedApp []*v1alpha1.Application) (err error) {
|
||||
for _, installedApp := range installedApp {
|
||||
if h.appConfig.AppScope.ClusterScoped && installedApp.IsClusterScoped() {
|
||||
return errors.New("only one cluster scoped app can install in on cluster")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) getInstalledApps() (installed bool, app []*v1alpha1.Application, err error) {
|
||||
var apps *v1alpha1.ApplicationList
|
||||
apps, err = h.client.AppV1alpha1().Applications().List(h.req.Request.Context(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list applications err=%v", err)
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, a := range apps.Items {
|
||||
if a.Spec.Name == h.app {
|
||||
installed = true
|
||||
app = append(app, &a)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion string) (err error) {
|
||||
var (
|
||||
admin string
|
||||
installAsAdmin bool
|
||||
cluserAppInstalled bool
|
||||
installedCluserAppOwner string
|
||||
)
|
||||
|
||||
if appInstalled && len(installedApps) > 0 {
|
||||
for _, installedApp := range installedApps {
|
||||
klog.Infof("app: %s is already installed by %s", installedApp.Spec.Name, installedApp.Spec.Owner)
|
||||
// if the app is already installed, and the app's owner is admin,
|
||||
appOwner := installedApp.Spec.Owner
|
||||
if slices.Contains(adminUsers, appOwner) {
|
||||
// check the app is installed as cluster scope
|
||||
if installedApp.IsClusterScoped() {
|
||||
cluserAppInstalled = true
|
||||
installedCluserAppOwner = appOwner
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case cluserAppInstalled:
|
||||
admin = installedCluserAppOwner
|
||||
installAsAdmin = false
|
||||
case !isAdmin:
|
||||
if len(adminUsers) == 0 {
|
||||
klog.Errorf("No admin user found")
|
||||
api.HandleBadRequest(h.resp, h.req, fmt.Errorf("no admin user found"))
|
||||
return
|
||||
}
|
||||
admin = adminUsers[0]
|
||||
installAsAdmin = false
|
||||
default:
|
||||
admin = h.owner
|
||||
installAsAdmin = true
|
||||
}
|
||||
|
||||
appConfig, _, err := apputils.GetAppConfig(h.req.Request.Context(), &apputils.ConfigOptions{
|
||||
App: h.app,
|
||||
RawAppName: h.rawAppName,
|
||||
Owner: h.owner,
|
||||
RepoURL: h.insReq.RepoURL,
|
||||
Version: chartVersion,
|
||||
Admin: admin,
|
||||
IsAdmin: installAsAdmin,
|
||||
MarketSource: marketSource,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get appconfig err=%v", err)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.appConfig = appConfig
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) setAppConfig(req *api.InstallRequest, appName string) {
|
||||
h.appConfig.AppName = appName
|
||||
h.appConfig.RawAppName = appName
|
||||
if req.RawAppName != "" {
|
||||
h.appConfig.RawAppName = req.RawAppName
|
||||
}
|
||||
h.appConfig.Title = req.Title
|
||||
var appid string
|
||||
if userspace.IsSysApp(req.RawAppName) {
|
||||
appid = appName
|
||||
} else {
|
||||
appid = utils.Md5String(appName)[:8]
|
||||
}
|
||||
h.appConfig.AppID = appid
|
||||
|
||||
entranceMap := make(map[string]string)
|
||||
for _, e := range req.Entrances {
|
||||
entranceMap[e.Name] = e.Title
|
||||
}
|
||||
|
||||
for i, e := range h.appConfig.Entrances {
|
||||
h.appConfig.Entrances[i].Title = entranceMap[e.Name]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) applyApplicationManager(marketSource string) (opID string, err error) {
|
||||
config, err := json.Marshal(h.appConfig)
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
var a *v1alpha1.ApplicationManager
|
||||
name, _ := apputils.FmtAppMgrName(h.app, h.owner, h.appConfig.Namespace)
|
||||
images := make([]api.Image, 0)
|
||||
if len(h.insReq.Images) != 0 {
|
||||
images = h.insReq.Images
|
||||
}
|
||||
imagesStr, _ := json.Marshal(images)
|
||||
appMgr := &v1alpha1.ApplicationManager{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
api.AppTokenKey: h.token,
|
||||
api.AppRepoURLKey: h.insReq.RepoURL,
|
||||
api.AppVersionKey: h.appConfig.Version,
|
||||
api.AppMarketSourceKey: marketSource,
|
||||
api.AppInstallSourceKey: "app-service",
|
||||
constants.ApplicationTitleLabel: h.appConfig.Title,
|
||||
constants.ApplicationImageLabel: string(imagesStr),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ApplicationManagerSpec{
|
||||
AppName: h.app,
|
||||
RawAppName: h.rawAppName,
|
||||
AppNamespace: h.appConfig.Namespace,
|
||||
AppOwner: h.owner,
|
||||
Config: string(config),
|
||||
Source: h.insReq.Source.String(),
|
||||
Type: v1alpha1.Type(h.appConfig.Type),
|
||||
OpType: v1alpha1.InstallOp,
|
||||
},
|
||||
}
|
||||
a, err = h.client.AppV1alpha1().ApplicationManagers().Get(h.req.Request.Context(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
_, err = h.client.AppV1alpha1().ApplicationManagers().Create(h.req.Request.Context(), appMgr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !appstate.IsOperationAllowed(a.Status.State, v1alpha1.InstallOp) {
|
||||
err = fmt.Errorf("%s operation is not allowed for %s state", v1alpha1.InstallOp, a.Status.State)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
// update Spec.Config
|
||||
patchData := map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
api.AppTokenKey: h.token,
|
||||
api.AppRepoURLKey: h.insReq.RepoURL,
|
||||
api.AppVersionKey: h.appConfig.Version,
|
||||
api.AppMarketSourceKey: marketSource,
|
||||
api.AppInstallSourceKey: "app-service",
|
||||
constants.ApplicationTitleLabel: h.appConfig.Title,
|
||||
},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"opType": v1alpha1.InstallOp,
|
||||
"config": string(config),
|
||||
"source": h.insReq.Source.String(),
|
||||
"rawAppName": h.rawAppName,
|
||||
},
|
||||
}
|
||||
var patchByte []byte
|
||||
patchByte, err = json.Marshal(patchData)
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
_, err = h.client.AppV1alpha1().ApplicationManagers().Patch(h.req.Request.Context(), a.Name, types.MergePatchType, patchByte, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
opID = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
now := metav1.Now()
|
||||
status := v1alpha1.ApplicationManagerStatus{
|
||||
OpType: v1alpha1.InstallOp,
|
||||
State: v1alpha1.Pending,
|
||||
OpID: opID,
|
||||
Message: "waiting for install",
|
||||
Progress: "0.00",
|
||||
StatusTime: &now,
|
||||
UpdateTime: &now,
|
||||
OpTime: &now,
|
||||
}
|
||||
a, err = apputils.UpdateAppMgrStatus(name, status)
|
||||
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
utils.PublishAppEvent(utils.EventParams{
|
||||
Owner: a.Spec.AppOwner,
|
||||
Name: a.Spec.AppName,
|
||||
OpType: string(a.Status.OpType),
|
||||
OpID: opID,
|
||||
State: v1alpha1.Pending.String(),
|
||||
RawAppName: a.Spec.RawAppName,
|
||||
Type: "app",
|
||||
Title: apputils.AppTitle(a.Spec.Config),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) setAppEnv(overrides []sysv1alpha1.AppEnvVar) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
}
|
||||
}()
|
||||
if len(overrides) == 0 {
|
||||
return nil
|
||||
}
|
||||
if h.appConfig == nil {
|
||||
return fmt.Errorf("refuse to set app env on nil appconfig")
|
||||
}
|
||||
if len(h.appConfig.Envs) == 0 {
|
||||
return fmt.Errorf("refuse to set app env on app: %s with no declared envs", h.appConfig.AppName)
|
||||
}
|
||||
for _, override := range overrides {
|
||||
var found bool
|
||||
for i := range h.appConfig.Envs {
|
||||
if h.appConfig.Envs[i].EnvName == override.EnvName {
|
||||
found = true
|
||||
h.appConfig.Envs[i].Value = override.Value
|
||||
if override.ValueFrom != nil {
|
||||
h.appConfig.Envs[i].ValueFrom = override.ValueFrom
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("app env '%s' not found in app config", override.EnvName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *installHandlerHelper) applyAppEnv(ctx context.Context) (err error) {
|
||||
_, err = apputils.ApplyAppEnv(ctx, h.h.ctrlClient, h.appConfig)
|
||||
if err != nil {
|
||||
api.HandleError(h.resp, h.req, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelperV2) setAppConfig(req *api.InstallRequest, appName string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (h *installHandlerHelperV2) _validateClusterScope(isAdmin bool, installedApps []*v1alpha1.Application) (err error) {
|
||||
klog.Info("validate cluster scope for install handler v2")
|
||||
|
||||
// check if subcharts has a client chart
|
||||
for _, subChart := range h.appConfig.SubCharts {
|
||||
if !subChart.Shared {
|
||||
if subChart.Name != h.app {
|
||||
err := fmt.Errorf("non-shared subchart must has the same name with the app, subchart name is %s but the main app is %s", subChart.Name, h.app)
|
||||
klog.Error(err)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// in V2, we do not check cluster scope here, the cluster scope app
|
||||
// will be checked if the cluster part is installed by another user in the installing phase
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *installHandlerHelperV2) getAppConfig(adminUsers []string, marketSource string, isAdmin, appInstalled bool, installedApps []*v1alpha1.Application, chartVersion string) (err error) {
|
||||
klog.Info("get app config for install handler v2")
|
||||
|
||||
var (
|
||||
admin string
|
||||
)
|
||||
|
||||
if isAdmin {
|
||||
admin = h.owner
|
||||
} else {
|
||||
if len(adminUsers) == 0 {
|
||||
klog.Errorf("No admin user found")
|
||||
api.HandleBadRequest(h.resp, h.req, fmt.Errorf("no admin user found"))
|
||||
return
|
||||
}
|
||||
admin = adminUsers[0]
|
||||
}
|
||||
|
||||
appConfig, _, err := apputils.GetAppConfig(h.req.Request.Context(), &apputils.ConfigOptions{
|
||||
App: h.app,
|
||||
RawAppName: h.rawAppName,
|
||||
Owner: h.owner,
|
||||
RepoURL: h.insReq.RepoURL,
|
||||
Version: chartVersion,
|
||||
Token: h.token,
|
||||
Admin: admin,
|
||||
MarketSource: marketSource,
|
||||
IsAdmin: isAdmin,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get appconfig err=%v", err)
|
||||
api.HandleBadRequest(h.resp, h.req, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.appConfig = appConfig
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Handler) isDeployAllowed(req *restful.Request, resp *restful.Response) {
|
||||
app := req.PathParameter(ParamAppName)
|
||||
owner := req.Attribute(constants.UserContextAttribute).(string)
|
||||
|
||||
name := fmt.Sprintf("%s-%s-%s", app, owner, app)
|
||||
var am v1alpha1.ApplicationManager
|
||||
err := h.ctrlClient.Get(req.Request.Context(), types.NamespacedName{Name: name}, &am)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
api.HandleError(resp, req, err)
|
||||
return
|
||||
}
|
||||
resp.WriteEntity(api.CanDeployResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.CanDeployResponseData{
|
||||
CanOp: true,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
if am.Status.State == v1alpha1.Uninstalled {
|
||||
resp.WriteEntity(api.CanDeployResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.CanDeployResponseData{
|
||||
CanOp: true,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
canOp := false
|
||||
if appstate.IsOperationAllowed(am.Status.State, v1alpha1.UninstallOp) {
|
||||
canOp = true
|
||||
}
|
||||
resp.WriteEntity(api.CanDeployResponse{
|
||||
Response: api.Response{Code: 200},
|
||||
Data: api.CanDeployResponseData{
|
||||
CanOp: canOp,
|
||||
},
|
||||
})
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue