I have created a NodeJS script for deploying review apps to Kubernetes for my GitLab repository. To do this, I’m using the Kubernetes NodeJS client.
For completeness sake, I have included truncated definitions of the Kubernetes resources.
const k8s = require('@kubernetes/client-node');
const logger = require('../logger');
const {
CI_COMMIT_REF_NAME,
CI_ENVIRONMENT_SLUG,
CI_ENVIRONMENT_URL,
CI_REGISTRY_IMAGE,
KUBE_NAMESPACE,
} = process.env;
const { hostname } = new URL(CI_ENVIRONMENT_URL);
const mysqlDeployment = {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name: `${CI_ENVIRONMENT_SLUG}-mysql`,
labels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'mysql',
},
},
spec: {
replicas: 1,
selector: {
matchLabels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'mysql',
},
},
template: {
metadata: {
labels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'mysql',
},
},
spec: {
containers: [
{
image: 'mysql:8',
name: 'mysql',
},
],
ports: { containerPort: 3306 },
},
},
},
};
const mysqlService = {
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: `${CI_ENVIRONMENT_SLUG}-mysql`,
labels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'mysql',
},
},
spec: {
ports: [{ port: 3306 }],
selector: {
app: CI_ENVIRONMENT_SLUG,
tier: 'mysql',
},
clusterIP: 'None',
},
};
const appDeployment = {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name: `${CI_ENVIRONMENT_SLUG}-frontend`,
labels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'frontend',
},
},
spec: {
replicas: 1,
selector: {
matchLabels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'frontend',
},
},
template: {
metadata: {
labels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'frontend',
},
},
spec: {
containers: [
{
image: `${CI_REGISTRY_IMAGE}:${CI_COMMIT_REF_NAME}`,
imagePullPolicy: 'Always',
name: 'app',
ports: [{ containerPort: 9999 }],
},
],
imagePullSecrets: [{ name: 'registry.gitlab.com' }],
},
},
},
};
const appService = {
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: `${CI_ENVIRONMENT_SLUG}-frontend`,
labels: {
app: CI_ENVIRONMENT_SLUG,
tier: 'frontend',
},
},
spec: {
ports: [{ port: 9999 }],
selector: {
app: CI_ENVIRONMENT_SLUG,
tier: 'frontend',
},
clusterIP: 'None',
},
};
const ingress = {
apiVersion: 'extensions/v1beta1',
kind: 'Ingress',
metadata: {
name: `${CI_ENVIRONMENT_SLUG}-ingress`,
labels: {
app: CI_ENVIRONMENT_SLUG,
},
annotations: {
'certmanager.k8s.io/cluster-issuer': 'letsencrypt-prod',
'kubernetes.io/ingress.class': 'nginx',
'nginx.ingress.kubernetes.io/proxy-body-size': '50m',
},
},
spec: {
tls: [
{
hosts: [hostname],
secretName: `${CI_ENVIRONMENT_SLUG}-prod`,
},
],
rules: [
{
host: hostname,
http: {
paths: [
{
path: '/',
backend: {
serviceName: `${CI_ENVIRONMENT_SLUG}-frontend`,
servicePort: 9999,
},
},
],
},
},
],
},
};
I use the following functions to deploy these resources to Kubernetes.
async function noConflict(resource, create, replace) {
const { kind } = resource;
const { name } = resource.metadata;
try {
logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
await create(KUBE_NAMESPACE, resource);
logger.info(`Created ${kind.toLowerCase()}: ${name}`);
} catch (err) {
if (err.response.statusCode !== 409) {
throw err;
}
logger.warn(`${kind} ${name} already exists… Replacing instead.`);
await replace(name, KUBE_NAMESPACE, resource);
logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
}
}
async function deploy() {
const kc = new k8s.KubeConfig();
kc.loadFromDefault();
const apps = kc.makeApiClient(k8s.Apps_v1Api);
const beta = kc.makeApiClient(k8s.Extensions_v1beta1Api);
const core = kc.makeApiClient(k8s.Core_v1Api);
await noConflict(
mysqlDeployment,
apps.createNamespacedDeployment.bind(apps),
apps.replaceNamespacedDeployment.bind(apps),
);
await noConflict(
mysqlService,
core.createNamespacedService.bind(core),
core.replaceNamespacedService.bind(core),
);
await noConflict(
appDeployment,
apps.createNamespacedDeployment.bind(apps),
apps.replaceNamespacedDeployment.bind(apps),
);
await noConflict(
appService,
core.createNamespacedService.bind(core),
core.replaceNamespacedService.bind(core),
);
await noConflict(
ingress,
beta.createNamespacedIngress.bind(beta),
beta.replaceNamespacedIngress.bind(beta),
);
}
The initial deployment goes fine, but the replacement of the mysql service fails with the following HTTP request body.
{ kind: 'Status',
apiVersion: 'v1',
metadata: {},
status: 'Failure',
message:
'Service "review-fix-kubern-8a4yh2-mysql" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update',
reason: 'Invalid',
details:
{ name: 'review-fix-kubern-8a4yh2-mysql',
kind: 'Service',
causes: [Array] },
code: 422 } }
I have tried modifying noConflict
to get the current version, and use the active versionResource
to replace resources.
async function noConflict(resource, create, get, replace) {
const { kind, metadata } = resource;
const { name } = resource.metadata;
try {
logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
await create(KUBE_NAMESPACE, resource);
logger.info(`Created ${kind.toLowerCase()}: ${name}`);
} catch (err) {
if (err.response.statusCode !== 409) {
throw err;
}
logger.warn(`${kind} ${name} already exists… Replacing instead.`);
const {
body: {
metadata: { resourceVersion },
},
} = await get(name, KUBE_NAMESPACE);
const body = {
...resource,
metadata: {
...metadata,
resourceVersion,
},
};
logger.warn(`${kind} ${name} already exists… Replacing instead.`);
await replace(name, KUBE_NAMESPACE, body);
logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
}
}
However, this gives me another error.
{ kind: 'Status',
apiVersion: 'v1',
metadata: {},
status: 'Failure',
message:
'Service "review-prevent-ku-md2ghh-frontend" is invalid: spec.clusterIP: Invalid value: "": field is immutable',
reason: 'Invalid',
details:
{ name: 'review-prevent-ku-md2ghh-frontend',
kind: 'Service',
causes: [Array] },
code: 422 } }
What should I do to replace the running resources?
Whether or not the the database stays up, is a minor detail.
Update
To address the comment by LouisBaumann:
I have changed by code to the following, where read
is the respective read call for each resource.
async function noConflict(resource, create, read, replace) {
const { kind } = resource;
const { name } = resource.metadata;
try {
logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
await create(KUBE_NAMESPACE, resource);
logger.info(`Created ${kind.toLowerCase()}: ${name}`);
} catch (err) {
if (err.response.statusCode !== 409) {
throw err;
}
logger.warn(`${kind} ${name} already exists… Replacing instead.`);
const { body: existing } = await read(name, KUBE_NAMESPACE);
await replace(name, KUBE_NAMESPACE, merge(existing, resource));
logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
}
}
The above doesn’t crash, but it doesn’t update the review environment either.
Update
To address the answer by Crou:
I have updated the replace calls with patch calls. So the noConflict
function becomes:
async function noConflict(resource, create, patch) {
const { kind } = resource;
const { name } = resource.metadata;
try {
logger.info(`Creating ${kind.toLowerCase()}: ${name}`);
await create(KUBE_NAMESPACE, resource);
logger.info(`Created ${kind.toLowerCase()}: ${name}`);
} catch (err) {
if (err.response.statusCode !== 409) {
throw err;
}
logger.warn(`${kind} ${name} already exists… Patching instead.`);
await patch(name, KUBE_NAMESPACE, resource);
logger.info(`Replaced ${kind.toLowerCase()}: ${name}`);
}
}
I also changed the noConflict
calls to pass the patch versions instead of the replace functions.
await noConflict(
mysqlDeployment,
apps.createNamespacedDeployment.bind(apps),
apps.patchNamespacedDeployment.bind(apps),
);
// etc
This resulted in the following error:
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "415: Unsupported Media Type",
"reason": "UnsupportedMediaType",
"details": {},
"code": 415
}
From what I understand you are using replace
incorrectly.
Replace a resource by filename or stdin.
JSON and YAML formats are accepted. If replacing an existing resource, the complete resource spec must be provided. This can be obtained by
$ kubectl get TYPE NAME -o yaml
If you do replace without getting the yaml
from Kubernetes, you are missing resourceVersion
. So this is why you get the error:
Service "review-fix-kubern-8a4yh2-mysql" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update
You should use patch
or apply
if you are replacing just parts of the Deployment
.