Commit 5217a351 authored by Jack Greenfield's avatar Jack Greenfield

Merge pull request #28 from jackgr/porcelain-client

Porcelain client
parents da42f86b 2999bb9b
SUBDIRS := expandybird/. resourcifier/. manager/. client/.
SUBDIRS := expandybird/. resourcifier/. manager/.
TARGETS := all build test push container clean
SUBDIRS_TARGETS := \
$(foreach t,$(TARGETS),$(addsuffix $t,$(SUBDIRS)))
GO_DEPS := github.com/kubernetes/deployment-manager/util/... github.com/kubernetes/deployment-manager/version/... github.com/kubernetes/deployment-manager/expandybird/... github.com/kubernetes/deployment-manager/resourcifier/... github.com/kubernetes/deployment-manager/manager/... github.com/kubernetes/deployment-manager/client/...
GO_DEPS := github.com/kubernetes/deployment-manager/util/... github.com/kubernetes/deployment-manager/version/... github.com/kubernetes/deployment-manager/expandybird/... github.com/kubernetes/deployment-manager/resourcifier/... github.com/kubernetes/deployment-manager/manager/... github.com/kubernetes/deployment-manager/dm/...
.PHONY : all build test clean $(TARGETS) $(SUBDIRS_TARGETS) .project .docker
build:
go get -v $(GO_DEPS)
go install -v $(GO_DEPS)
all: build
clean:
......@@ -16,10 +20,6 @@ clean:
test: build
-go test -v $(GO_DEPS)
build:
go get -v $(GO_DEPS)
go install -v $(GO_DEPS)
push: container
container: .project .docker
......
# Deployment Manager
Deployment Manager lets you define and deploy simple declarative configuration
for your Kubernetes resources (e.g., pods, replication controllers, services, etc.).
[![Go Report Card](http://goreportcard.com/badge/kubernetes/deployment-manager)](http://goreportcard.com/report/kubernetes/deployment-manager)
You can also use Python or [Jinja](http://jinja.pocoo.org/) to create powerful
parameterizable abstract types called **Templates**. You can create general
abstract building blocks to reuse, like a
[Replicated Service](examples/guestbook/replicatedservice.py), or create
more concrete types like a [Redis cluster](examples/guestbook/redis.jinja).
Deployment Manager (DM) provides parameterized templates for Kubernetes clusters.
You can find more examples of Templates and configurations in our
[examples](examples).
You can use it deploy ready-to-use types, such as:
* [Replicated Service](types/replicatedservice/v1)
* [Redis](types/redis/v1)
Deployment Manager uses the same concepts and languages as
[Google Cloud Deployment Manager](https://cloud.google.com/deployment-manager/overview),
but creates resources within your Kubernetes cluster, not on the Google Cloud Platform.
Please join us on [the Google Group](https://groups.google.com/forum/#!forum/kubernetes-sig-config) and/or in [the Slack chat room](https://kubernetes.slack.com/messages/sig-configuration/) for the
Kubernetes configuration SIG.
As you can see, types live in ordinary Github repositories. This repository is a
DM type registry.
## Getting started
You can also use DM to deploy simple templates that use existing types, such as:
* [Guestbook](examples/guestbook/guestbook.yaml)
* [Deployment Manager](examples/bootstrap/bootstrap.yaml)
There are two ways to get started...
As you can see, a template is just a `YAML` file that supplies parameters to
instantiate types. (Yes, you're reading that second example correctly. It uses DM
to deploy itself. See [examples/bootstrap/README.md](examples/bootstrap/README.md)
for more information)
* The quick way simply installs Deployment Manager in your cluster using
kubectl. This is the fastest way to get started and takes only a few seconds.
DM runs server side, on your Kubernetes cluster, so it can tell you what types
you've instantiated in the cluster, and even what resources comprise a given instance.
So, you can ask questions like:
* Show me all the Redis slaves running in this cluster.
* Show me all the resources used by Redis.
* The interesting way bootstraps Deployment Manager, by building and running a
local instance on your machine, and then using it to install another instance
in your cluster. You might want to go this way if you're interested in contributing
to Deployment Manager.
Because DM stores its state in the cluster, not on your workstation, you can ask
those questions from any client at any time.
Both assume that you have a Kubernetes cluster up and running, and that you can
run `kubectl` commands against it. They both also assume that that you're working
with a clone of the repository installed in the src folder of your GOPATH, per
convention.
Please hang out with us in
[the Slack chat room](https://kubernetes.slack.com/messages/sig-configuration/)
and/or
[the Google Group](https://groups.google.com/forum/#!forum/kubernetes-sig-config)
for the Kubernetes configuration SIG. Your feedback and contributions are welcome.
Instructions for the quick install follow here. Instructions for bootstrapping
Deployment Manager can be found in [examples/bootstrap/README.md](examples/bootstrap/README.md).
## Installing Deployment Manager
### Quick Install
For the quick install, you're going to use `kubectl` to create the replication
controllers and services that comprise a Deployment Manager instance from a predefined
configuration file, as follows:
Follow these 3 steps to install DM:
1. Make sure your Kubernetes cluster is up and running, and that you can run
`kubectl` commands against it.
1. Clone this repository into the src folder of your GOPATH, if you haven't already.
1. Use `kubectl` to intall DM into your cluster:
```
kubectl create -f install.yaml
```
That's it. You should now be able to see Deployment Manager running in your cluster
using:
That's it. You can now use `kubectl` to see DM running in your cluster:
```
kubectl get pod,rc,service
```
If you see replication controllers named expandybird-rc, manager-rc and resourcifier-rc
with pods that are READY, and services with corresponding names, then Deployment
Manager is up and running.
If you see expandybird-service, manager-service, resourcifier-service, and
expandybird-rc, manager-rc and resourcifier-rc with pods that are READY, then DM
is up and running!
Note that you can also tear down Deployment Manager using the same file, with:
The easiest way to interact with Deployment Manager is through `kubectl` proxy:
```
kubectl delete -f install.yaml
kubectl proxy --port=8001 &
```
The easiest way to interact with Deployment Manager, now that it's up and running,
is to use a `kubectl` proxy:
This command starts a proxy that lets you interact with the Kubernetes api
server through port 8001 on localhost. `dm` uses
`http://localhost:8001/api/v1/proxy/namespaces/default/services/manager-service:manager`
as the default service address for DM.
## Using Deployment Manager
You can use `dm` to deploy a type from the command line. This command deploys a
redis cluster with two workers from the type definition in this repository:
```
kubectl proxy --port=8001 &
dm deploy redis/v1
```
This command will start a proxy that lets you interact with the Kubernetes api
server through port 8001 on you local host. However, there are other ways to access
Deployment Manager. We won't go into them here, but if you know how to access
services running on Kubernetes, you should be able to use any of the supported
methods to access Deployment Manager.
When you deploy a type, you can optionally supply values for input parameters,
like this:
```
dm --properties workers=3 deploy redis/v1
```
### Deploying your first application (Guestbook)
When you deploy a type, `dm` generates a template from the type and input
paramaters, and then deploys it.
Next, you're going to deploy the canonical guestbook example to your Kubernetes
cluster.
You can also deploy an existing template, or read one from `stdin`. This command
deploys the canonical Guestbook example from the examples directory:
```
client --name guestbook --service=http://localhost:8001/api/v1/proxy/namespaces/default/services/manager-service:manager examples/guestbook/guestbook.yaml
dm deploy examples/guestbook/guestbook.yaml
```
You should now have guestbook up and running. To verify, get the list of services
running on the cluster:
You can now use `kubectl` to see Guestbook running:
```
kubectl get service
```
You should see frontend-service running. If your cluster supports external
load balancing, it will have an external IP assigned to it, and you should be
able to navigate to it in your browser to see the guestbook in action.
Look for frontend-service. If your cluster supports external load balancing, it
will have an external IP assigned to it, and you can navigate to it in your browser
to see the guestbook in action.
For more information about this example, see [examples/guestbook/README.md](examples/guestbook/README.md)
## Additional commands
The command line tool makes it easy to configure a cluster from a set of predefined
types.
## Uninstalling Deployment Manager
You can uninstall Deployment Manager using the same configuration file:
```
kubectl delete -f install.yaml
```
## Creating a type registry
All you need to create a type registry is a Github repository with top level file
named `registry.yaml`, and a top level folder named `types` that contains type definitions.
A type definition is just a folder that contains one or more versions, like `/v1`,
`/v2`, etc.
A version is just a folder that contains a type definition. As you can see from the
examples above, a type definition is just a Python or [Jinja](http://jinja.pocoo.org/)
file plus an optional schema.
## Building the container images
This project runs Deployment Manager on Kubernetes as three replicated services.
By default, prebuilt images stored in Google Container Registry are used to create
them. However, you can build your own container images and push them to your own
project in the registry.
By default, install.yaml uses prebuilt images stored in Google Container Registry
to install them. However, you can build your own container images and push them
to your own project in the Google Container Registry:
To build and push your own images to Google Container Registry, first set the
environment variable PROJECT to the name of a project known to gcloud. Then, run
the following command:
1. Set the environment variable PROJECT to the name of a project known to gcloud.
1. Run the following command:
```
make push
......@@ -123,7 +154,13 @@ available.
The project is still under active development, so you might run into issues. If
you do, please don't be shy about letting us know, or better yet, contributing a
fix or feature. We use the same contribution conventions as the main Kubernetes
repository.
fix or feature. We use the same [development process](CONTRIBUTING.md) as the main
Kubernetes repository.
## Relationship to Google Cloud Platform
DM uses the same concepts and languages as
[Google Cloud Deployment Manager](https://cloud.google.com/deployment-manager/overview),
but creates resources in Kubernetes clusters, not in Google Cloud Platform projects.
# Makefile for the Docker image gcr.io/$(PROJECT)/expandybird
# MAINTAINER: Jack Greenfield <jackgr@google.com>
# If you update this image please check the tag value before pushing.
.PHONY : all build test push container clean
test: client
client --action=expand test/guestbook.yaml test/replicatedservice.py test/redis.jinja > /dev/null
client:
go get -v ./...
go install -v ./...
######################################################################
# Copyright 2015 The Kubernetes Authors All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
imports:
- path: replicatedservice.py
resources:
- name: expandybird
type: replicatedservice.py
properties:
service_port: 8081
target_port: 8080
container_port: 8080
external_service: false
replicas: 2
image: gcr.io/PROJECT/expandybird:latest
labels:
app: dm
- name: resourcifier
type: replicatedservice.py
properties:
service_port: 8082
target_port: 8080
container_port: 8080
external_service: false
replicas: 2
image: gcr.io/PROJECT/resourcifier:latest
labels:
app: dm
- name: manager
type: replicatedservice.py
properties:
service_port: 8080
target_port: 8080
container_port: 8080
external_service: true
replicas: 1
image: gcr.io/PROJECT/manager:latest
labels:
app: dm
{% set REDIS_PORT = 6379 %}
{% set WORKERS = properties['workers'] or 2 %}
resources:
- name: redis-master
type: replicatedservice.py
properties:
# This has to be overwritten since service names are hard coded in the code
service_name: redis-master
service_port: {{ REDIS_PORT }}
target_port: {{ REDIS_PORT }}
container_port: {{ REDIS_PORT }}
replicas: 1
container_name: master
image: redis
- name: redis-slave
type: replicatedservice.py
properties:
# This has to be overwritten since service names are hard coded in the code
service_name: redis-slave
service_port: {{ REDIS_PORT }}
container_port: {{ REDIS_PORT }}
replicas: {{ WORKERS }}
container_name: worker
image: kubernetes/redis-slave:v2
# An example of how to specify env variables.
env:
- name: GET_HOSTS_FROM
value: env
- name: REDIS_MASTER_SERVICE_HOST
value: redis-master
######################################################################
# Copyright 2015 The Kubernetes Authors All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""Defines a ReplicatedService type by creating both a Service and an RC.
This module creates a typical abstraction for running a service in a
Kubernetes cluster, namely a replication controller and a service packaged
together into a single unit.
"""
import yaml
SERVICE_TYPE_COLLECTION = 'Service'
RC_TYPE_COLLECTION = 'ReplicationController'
def GenerateConfig(context):
"""Generates a Replication Controller and a matching Service.
Args:
context: Template context, which can contain the following properties:
container_name - Name to use for container. If omitted, name is
used.
namespace - Namespace to create the resources in. If omitted,
'default' is used.
service_name - Name to use for service. If omitted name-service is
used.
protocol - Protocol to use for the service
service_port - Port to use for the service
target_port - Target port for the service
container_port - Container port to use
replicas - Number of replicas to create in RC
image - Docker image to use for replicas. Required.
labels - labels to apply.
env - Environmental variables to apply (list of maps). Format
should be:
[{'name': ENV_VAR_NAME, 'value':'ENV_VALUE'},
{'name': ENV_VAR_NAME_2, 'value':'ENV_VALUE_2'}]
external_service - If set to true, enable external Load Balancer
Returns:
A Container Manifest as a YAML string.
"""
# YAML config that we're going to create for both RC & Service
config = {'resources': []}
name = context.env['name']
container_name = context.properties.get('container_name', name)
namespace = context.properties.get('namespace', 'default')
# Define things that the Service cares about
service_name = context.properties.get('service_name', name + '-service')
service_type = SERVICE_TYPE_COLLECTION
# Define things that the Replication Controller (rc) cares about
rc_name = name + '-rc'
rc_type = RC_TYPE_COLLECTION
service = {
'name': service_name,
'type': service_type,
'properties': {
'apiVersion': 'v1',
'kind': 'Service',
'namespace': namespace,
'metadata': {
'name': service_name,
'labels': GenerateLabels(context, service_name),
},
'spec': {
'ports': [GenerateServicePorts(context, container_name)],
'selector': GenerateLabels(context, name)
}
}
}
set_up_external_lb = context.properties.get('external_service', None)
if set_up_external_lb:
service['properties']['spec']['type'] = 'LoadBalancer'
config['resources'].append(service)
rc = {
'name': rc_name,
'type': rc_type,
'properties': {
'apiVersion': 'v1',
'kind': 'ReplicationController',
'namespace': namespace,
'metadata': {
'name': rc_name,
'labels': GenerateLabels(context, rc_name),
},
'spec': {
'replicas': context.properties['replicas'],
'selector': GenerateLabels(context, name),
'template': {
'metadata': {
'labels': GenerateLabels(context, name),
},
'spec': {
'containers': [
{
'env': GenerateEnv(context),
'name': container_name,
'image': context.properties['image'],
'ports': [
{
'name': container_name,
'containerPort': context.properties['container_port'],
}
]
}
]
}
}
}
}
}
config['resources'].append(rc)
return yaml.dump(config)
# Generates labels either from the context.properties['labels'] or generates
# a default label 'name':name
def GenerateLabels(context, name):
"""Generates labels from context.properties['labels'] or creates default.
We make a deep copy of the context.properties['labels'] section to avoid
linking in the yaml document, which I believe reduces readability of the
expanded template. If no labels are given, generate a default 'name':name.
Args:
context: Template context, which can contain the following properties:
labels - Labels to generate
Returns:
A dict containing labels in a name:value format
"""
tmp_labels = context.properties.get('labels', None)
ret_labels = {'name': name}
if isinstance(tmp_labels, dict):
for key, value in tmp_labels.iteritems():
ret_labels[key] = value
return ret_labels
def GenerateServicePorts(context, name):
"""Generates a ports section for a service.
Args:
context: Template context, which can contain the following properties:
service_port - Port to use for the service
target_port - Target port for the service
protocol - Protocol to use.
Returns:
A dict containing a port definition
"""
service_port = context.properties.get('service_port', None)
target_port = context.properties.get('target_port', None)
protocol = context.properties.get('protocol')
ports = {}
if name:
ports['name'] = name
if service_port:
ports['port'] = service_port
if target_port:
ports['targetPort'] = target_port
if protocol:
ports['protocol'] = protocol
return ports
def GenerateEnv(context):
"""Generates environmental variables for a pod.
Args:
context: Template context, which can contain the following properties:
env - Environment variables to set.
Returns:
A list containing env variables in dict format {name: 'name', value: 'value'}
"""
env = []
tmp_env = context.properties.get('env', [])
for entry in tmp_env:
if isinstance(entry, dict):
env.append({'name': entry.get('name'), 'value': entry.get('value')})
return env
This diff is collapsed.
# Bootstrapping Deployment Manager
Welcome to the bootstrap example. The instructions below will step you through
the process of building and running a local instance of Deployment Manager on
your local machine, and then using it to deploy another instance in your cluster.
the process of building and running a local instance of DM on your local machine,
and then using it to deploy another instance of DM in your cluster.
This example provides insights into how Deployment Manager works, and is
recommended for anyone interested in contributing to the project.
This example provides insights into how DM works, and is recommended for anyone
interested in contributing to the project.
The instructions below assume that you have a Kubernetes cluster up and running,
and that you can run `kubectl` commands against it. They also assume that that
you're working with a clone of the repository installed in the src folder of your
GOPATH and that your PATH contains `$GOPATH/bin`, per convention.
## Prerequisites
## Installing required python packages
Before you can bootstrap DM, the following prerequisites must be satisfied.
### Kubernetes cluster and go configuration
1. Make sure your Kubernetes cluster is up and running, and that you can run
`kubectl` commands against it.
1. Clone this repository into the src folder of your GOPATH, if you haven't already.
1. Make sure your PATH contains `$GOPATH/bin`.
### Installing required python packages
Since Deployment Manager uses Python and will be running locally on your
machine, you will first need to make sure the necessary Python packages are
installed. This assumes that you have already installed the pip package
management system on your machine.
machine, you need to make sure the necessary Python packages are installed. This
step assumes that you have already installed the pip package management system
on your machine.
```
sudo pip install -r expandybird/requirements.txt
pip install -r expandybird/requirements.txt
```
## Building and installing the binaries
Note: depending on how you installed python and pip, you may need to use `sudo`
for this command.
## Bootstrapping Deployment Manager
With the prerequisites satisfied, you're ready to bootstrap DM.
Next, you're going to build and install the Deployment Manager binaries. You can
do this by running make in the repository root.
### Building and installing the binaries
First, you're going to build and install the DM binaries. You can do this by
running make in the repository root.
```
make
```
## Bootstrapping Deployment Manager
Now, you're ready to bootstrap Deployment Manager into the cluster.
### Start Deployment Manager on localhost
First, start the three Deployment Manager binaries on localhost using the supplied
bootstrap script.
Next, start the three DM binaries on localhost using the supplied bootstrap script.
```
./examples/bootstrap/bootstrap.sh
......@@ -54,14 +62,15 @@ It also starts kubectl proxy on port 8001.
### Deploy Deployment Manager into your cluster
Next, use the Deployment Manager running on localhost to deploy itself onto the
cluster using the supplied command line tool and template.
Finally, use the DM running on localhost to deploy another instance of DM onto
the cluster using `dm` and the supplied template. Note that you are using the
`--service` flag to point `dm` to the instance of DM running on localhost, rather
than to an instance of DM running in the cluster through `kubectl proxy`, which
is the default.
```
client --name test --service=http://localhost:8080 examples/bootstrap/bootstrap.yaml
dm --service=http://localhost:8080 deploy examples/bootstrap/bootstrap.yaml
```
You should now have Deployment Manager running on your cluster, and it should be
visible using kubectl (kubectl get pod,rc,service).
You now have Deployment Manager running on your cluster. You can see it running
using `kubectl`, as described in the top level [README.md](../../README.md).
# Guestbook Example
Guestbook example shows how to bring up the
[Guestbook Example](https://github.com/kubernetes/kubernetes/tree/master/examples/guestbook)
from Kubernetes using Deployment Manager. It also shows you how to construct
and reuse parameterized templates.
Welcome to the Guestbook example. It shows you how to build and reuse
parameterized templates.
## Getting started
## Prerequisites
It is assumed that you have bootstrapped the Deployment Manager on your cluster
by following the [README.md][https://github.com/kubernetes/deployment-manager/blob/master/README.md]
for bootstrapping the cluster.
First, make sure DM is installed in your Kubernetes cluster and that the
Guestbook example is deployed by following the instructions in the top level
[README.md](../../README.md).
## Deploying Guestbook
To deploy the Guestbook example, you run the following command.
## Understanding the Guestbook example template
```
client --name guestbook --service=http://localhost:8001/api/v1/proxy/namespaces/default/services/manager-service:manager examples/guestbook/guestbook.yaml
```
Let's take a closer look at the template used by the Guestbook example.
### Replicated services
The typical design pattern for microservices in Kubernetes is to create a
replication controller and a service with the same selector, so that the service
exposes ports from the pods managed by the replication controller.
We have created a parameterized type for this kind of replicated service called
[Replicated Service](../../types/replicatedservice/v1), and we use it three times in this
example.
Note that the type is defined by a
[python script](../../types/replicatedservice/v1/replicatedservice.py). It also has a
[schema](../../types/replicatedservice/v1/replicatedservice.py.schema). Schemas are
optional. If present in the type definition, they are used to validate uses of the
type that appear in DM templates.
For more information about types and templates, see the [design document](../../docs/design/design.md).
### Replicated Service
### The Guestbook application
The Guestbook application consists of 2 microservices: a front end and a Redis cluster.
Typical pattern for deploying microservices in Kubernetes is to create both a
Replication Controller and a Service. We have created a parameterizable type
for that called [Replicated Service](https://github.com/kubernetes/deployment-manager/tree/master/examples/replicatedservice)
that we use throughout this example.
#### The front end
The front end is a replicated service with 3 replicas:
The Guestbook example consists of 2 services, a frontend and a Redis service.
Frontend is a replicated service with 3 replicas and is created like so:
```
- name: frontend
type: https://raw.githubusercontent.com/kubernetes/deployment-manager/master/examples/replicatedservice/replicatedservice.py
......@@ -38,8 +49,14 @@ Frontend is a replicated service with 3 replicas and is created like so:
image: gcr.io/google_containers/example-guestbook-php-redis:v3
```
Redis is a composite type and consists of two replicated services. A master with a single replica
and the slaves with 2 replicas. It's construced as follows:
(Note that we use the URL for the type replicatedservice.py, not just the type name.)
#### The Redis cluster
The Redis cluster consists of two replicated services: a master with a single replica
and the slaves with 2 replicas. It's defined by [this composite type](../../types/redis/v1/redis.jinja),
which is a [Jinja](http://jinja.pocoo.org/) template with a [schema](../../types/redis/v1/redis.jinja.schema).
```
{% set REDIS_PORT = 6379 %}
{% set WORKERS = properties['workers'] or 2 %}
......@@ -77,22 +94,27 @@ resources:
### Displaying types
You can also see what types have been deployed to the cluster:
You can see the types you deployed to the cluster using the `deployed-types` command:
```
client --action listtypes --service=http://localhost:8001/api/v1/proxy/namespaces/default/services/manager-service:manager
dm deployed-types
["Service","ReplicationController","redis.jinja","https://raw.githubusercontent.com/kubernetes/deployment-manager/master/examples/replicatedservice/replicatedservice.py"]
```
This shows that there are 2 native types that we have deployed (Service and ReplicationController) and
2 composite types (redis.jinja and one imported from github (replicatedservice.py)).
This output shows 2 primitive types (Service and ReplicationController), and 2
composite types (redis.jinja and one imported from github (replicatedservice.py)).
You can also see where a specific type is being used with the `deployed-instances` command:
You can also see where the types are being used by getting details on the particular type:
```
client -action gettype --service=http://localhost:8001/api/v1/proxy/namespaces/default/services/manager-service:manager -name 'Service'
dm deployed-instances Service
[{"name":"frontend-service","type":"Service","deployment":"guestbook4","manifest":"manifest-1446682551242763329","path":"$.resources[0].resources[0]"},{"name":"redis-master","type":"Service","deployment":"guestbook4","manifest":"manifest-1446682551242763329","path":"$.resources[1].resources[0].resources[0]"},{"name":"redis-slave","type":"Service","deployment":"guestbook4","manifest":"manifest-1446682551242763329","path":"$.resources[1].resources[1].resources[0]"}]
```
It lists which deployment and manifest as well as JSON path to the type.
This output describes the deployment and manifest, as well as the JSON paths to
the instances of the type within the layout.
For more information about deployments, manifests and layouts, see the [design document](../../docs/design/design.md).
{% set REDIS_PORT = 6379 %}
{% set WORKERS = properties['workers'] or 2 %}
resources:
- name: redis-master
type: https://raw.githubusercontent.com/kubernetes/deployment-manager/master/examples/replicatedservice/replicatedservice.py
properties:
# This has to be overwritten since service names are hard coded in the code
service_name: redis-master
service_port: {{ REDIS_PORT }}
target_port: {{ REDIS_PORT }}
container_port: {{ REDIS_PORT }}
replicas: 1
container_name: master
image: redis
- name: redis-slave
type: https://raw.githubusercontent.com/kubernetes/deployment-manager/master/examples/replicatedservice/replicatedservice.py
properties:
# This has to be overwritten since service names are hard coded in the code
service_name: redis-slave
service_port: {{ REDIS_PORT }}
container_port: {{ REDIS_PORT }}
replicas: {{ WORKERS }}
container_name: worker
image: kubernetes/redis-slave:v2
# An example of how to specify env variables.
env:
- name: GET_HOSTS_FROM
value: env
- name: REDIS_MASTER_SERVICE_HOST
value: redis-master
info:
title: Redis cluster
description: Defines a redis cluster, using a single replica
replicatedservice for master and replicatedservice for workers.
properties:
workers:
type: int
default: 2
description: Number of worker replicas.
"""Defines a ReplicatedService type by creating both a Service and an RC.
This module creates a typical abstraction for running a service in a
Kubernetes cluster, namely a replication controller and a service packaged
together into a single unit.
"""
import yaml
SERVICE_TYPE_COLLECTION = 'Service'
RC_TYPE_COLLECTION = 'ReplicationController'
def GenerateConfig(context):
"""Generates a Replication Controller and a matching Service.
Args:
context: Template context. See schema for context properties.
Returns:
A Container Manifest as a YAML string.
"""
# YAML config that we're going to create for both RC & Service
config = {'resources': []}
name = context.env['name']
container_name = context.properties.get('container_name', name)
namespace = context.properties.get('namespace', 'default')
# Define things that the Service cares about
service_name = context.properties.get('service_name', name + '-service')
service_type = SERVICE_TYPE_COLLECTION
# Define things that the Replication Controller (rc) cares about
rc_name = name + '-rc'
rc_type = RC_TYPE_COLLECTION
service = {
'name': service_name,
'type': service_type,
'properties': {
'apiVersion': 'v1',
'kind': 'Service',
'namespace': namespace,
'metadata': {
'name': service_name,
'labels': GenerateLabels(context, service_name),
},
'spec': {
'ports': [GenerateServicePorts(context, container_name)],
'selector': GenerateLabels(context, name)
}
}
}
set_up_external_lb = context.properties.get('external_service', None)
if set_up_external_lb:
service['properties']['spec']['type'] = 'LoadBalancer'
config['resources'].append(service)
rc = {
'name': rc_name,
'type': rc_type,
'properties': {
'apiVersion': 'v1',
'kind': 'ReplicationController',
'namespace': namespace,
'metadata': {
'name': rc_name,
'labels': GenerateLabels(context, rc_name),
},
'spec': {
'replicas': context.properties['replicas'],
'selector': GenerateLabels(context, name),
'template': {
'metadata': {
'labels': GenerateLabels(context, name),
},
'spec': {
'containers': [
{
'env': GenerateEnv(context),
'name': container_name,
'image': context.properties['image'],
'ports': [
{
'name': container_name,
'containerPort': context.properties['container_port'],
}
]
}
]
}
}
}
}
}
config['resources'].append(rc)
return yaml.dump(config)
# Generates labels either from the context.properties['labels'] or generates
# a default label 'name':name
def GenerateLabels(context, name):
"""Generates labels from context.properties['labels'] or creates default.
We make a deep copy of the context.properties['labels'] section to avoid
linking in the yaml document, which I believe reduces readability of the
expanded template. If no labels are given, generate a default 'name':name.
Args:
context: Template context, which can contain the following properties:
labels - Labels to generate
Returns:
A dict containing labels in a name:value format
"""
tmp_labels = context.properties.get('labels', None)
ret_labels = {'name': name}
if isinstance(tmp_labels, dict):
for key, value in tmp_labels.iteritems():
ret_labels[key] = value
return ret_labels
def GenerateServicePorts(context, name):
"""Generates a ports section for a service.
Args:
context: Template context, which can contain the following properties:
service_port - Port to use for the service
target_port - Target port for the service
protocol - Protocol to use.
Returns:
A dict containing a port definition
"""
service_port = context.properties.get('service_port', None)
target_port = context.properties.get('target_port', None)
protocol = context.properties.get('protocol')
ports = {}
if name:
ports['name'] = name
if service_port:
ports['port'] = service_port
if target_port:
ports['targetPort'] = target_port
if protocol:
ports['protocol'] = protocol
return ports
def GenerateEnv(context):
"""Generates environmental variables for a pod.
Args:
context: Template context, which can contain the following properties:
env - Environment variables to set.
Returns:
A list containing env variables in dict format {name: 'name', value: 'value'}
"""
env = []
tmp_env = context.properties.get('env', [])
for entry in tmp_env:
if isinstance(entry, dict):
env.append({'name': entry.get('name'), 'value': entry.get('value')})
return env
info:
title: Replicated Service
description: |
Defines a ReplicatedService type by creating both a Service and an RC.
This module creates a typical abstraction for running a service in a
Kubernetes cluster, namely a replication controller and a service packaged
together into a single unit.
required:
- image
properties:
container_name:
type: string
description: Name to use for container. If omitted, name is used.
service_name:
type: string
description: Name to use for service. If omitted, name-service is used.
namespace:
type: string
description: Namespace to create resources in. If omitted, 'default' is
used.
default: default
protocol:
type: string
description: Protocol to use for the service.
service_port:
type: int
description: Port to use for the service.
target_port:
type: int
description: Target port to use for the service.
container_port:
type: int
description: Port to use for the container.
replicas:
type: int
description: Number of replicas to create in RC.
image:
type: string
description: Docker image to use for replicas.
labels:
type: object
description: Labels to apply.
env:
type: object
description: Environment variables to apply.
properties:
name:
type: string
value:
type: string
external_service:
type: boolean
description: If set to true, enable external load balancer.
......@@ -110,9 +110,9 @@ var ServiceWrapperTestCases = []ServiceWrapperTestCase{
func TestServiceWrapper(t *testing.T) {
backend := expander.NewExpander("../expansion/expansion.py")
wrapper := NewService(NewExpansionHandler(backend))
container := restful.DefaultContainer
container := restful.NewContainer()
container.ServeMux = http.NewServeMux()
wrapper.Register(container)
defer container.Remove(wrapper.WebService)
handlerTester := util.NewHandlerTester(container)
for _, swtc := range ServiceWrapperTestCases {
reader := GetTemplateReader(t, swtc.Description, inputFileName)
......@@ -172,7 +172,7 @@ var ExpansionHandlerTestCases = []ExpansionHandlerTestCase{
}
var malformedExpansionOutput = []byte(`
this is malformed output
this: is: invalid: yaml:
`)
type mockExpander struct {
......@@ -182,9 +182,9 @@ type mockExpander struct {
// expanded configuration as a string on success.
func (e *mockExpander) ExpandTemplate(template *expander.Template) (string, error) {
switch template.Name {
case "InvalidFileName":
case "InvalidFileName.yaml":
return "", fmt.Errorf("expansion error")
case "InvalidTypeName":
case "InvalidTypeName.yaml":
return string(malformedExpansionOutput), nil
}
......@@ -196,7 +196,6 @@ func TestExpansionHandler(t *testing.T) {
wrapper := NewService(NewExpansionHandler(backend))
container := restful.DefaultContainer
wrapper.Register(container)
defer container.Remove(wrapper.WebService)
handlerTester := util.NewHandlerTester(container)
for _, ehtc := range ExpansionHandlerTestCases {
reader := GetTemplateReader(t, ehtc.Description, ehtc.TemplateFileName)
......
......@@ -11,18 +11,4 @@
# limitations under the License.
######################################################################
imports:
- path: redis.jinja
- path: replicatedservice.py
resources:
- name: frontend
type: replicatedservice.py
properties:
service_port: 80
container_port: 80
external_service: true
replicas: 3
image: gcr.io/google_containers/example-guestbook-php-redis:v3
- name: redis
type: redis.jinja
properties: null
root: .
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment