참고 : https://discuss.hashicorp.com/t/escape-characters-recognized-as-a-variable-in-template-stanza/40525
Nomad를 통해 Ops작업을 수행할 때 sysbatch
타입의 Job에 Ansible을 raw_exec
로 실행하면 전체 노드에서 일괄로 작업을 수행할 수 있다.
약 4 분
참고 : https://discuss.hashicorp.com/t/escape-characters-recognized-as-a-variable-in-template-stanza/40525
Nomad를 통해 Ops작업을 수행할 때 sysbatch
타입의 Job에 Ansible을 raw_exec
로 실행하면 전체 노드에서 일괄로 작업을 수행할 수 있다.
# nomad var put {path기반의 varialbes} key=vaule
$ nomad var put code/config password=password
Nomad
- Java Driver : https://developer.hashicorp.com/nomad/docs/drivers/java
- Schecuduler Config : https://developer.hashicorp.com/nomad/api-docs/operator/scheduler
job "nexus" {
datacenters = ["dc1"]
group "nexus" {
count = 1
network {
port "http" {
to = 8081
static = 8081
}
}
task "nexus" {
driver = "docker"
config {
image = "sonatype/nexus3"
ports = ["http"]
}
env {
INSTALL4J_ADD_VM_PARAMS = "-Xms2703m -Xmx2703m -XX:MaxDirectMemorySize=2703m -Djava.util.prefs.userRoot=/some-other-dir"
}
resources {
cpu = 1000
memory = 2703
}
}
}
}
job "nginx" {
datacenters = ["dc1"]
group "nginx" {
constraint {
attribute = "${attr.unique.hostname}"
value = "slave0"
}
#Vault tls가 있고 nomad client hcl 파일에 host volume이 명시되어 있는 설정 값
volume "cert-data" {
type = "host"
source = "cert-data"
read_only = false
}
#실패 없이 되라고 행운의 숫자인 7을 4번 줌
network {
port "http" {
to = 7777
static = 7777
}
}
service {
name = "nginx"
port = "http"
}
task "nginx" {
driver = "docker"
volume_mount {
volume = "cert-data"
destination = "/usr/local/cert"
}
config {
image = "nginx"
ports = ["http"]
volumes = [
"local:/etc/nginx/conf.d",
]
}
template {
data = <<EOF
#Vault는 active서버 1대외에는 전부 standby상태이며
#서비스 호출 시(write)에는 active 서비스만 호출해야함으로 아래와 같이 consul에서 서비스를 불러옴
upstream backend {
{{ range service "active.vault" }}
server {{ .Address }}:{{ .Port }};
{{ else }}server 127.0.0.1:65535; # force a 502
{{ end }}
}
server {
listen 7777 ssl;
#위에서 nomad host volume을 mount한 cert를 가져옴
ssl on;
ssl_certificate /usr/local/cert/vault/global-client-vault-0.pem;
ssl_certificate_key /usr/local/cert/vault/global-client-vault-0-key.pem;
#vault ui 접근 시 / -> /ui redirect되기 때문에 location이 /외에는 되지 않는다.
location / {
proxy_pass https://backend;
}
}
EOF
destination = "local/load-balancer.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
resources {
cpu = 100
memory = 201
}
}
}
}
job "elk" {
datacenters = ["dc1"]
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
update {
stagger = "10s"
max_parallel = 1
}
group "elk" {
count = 1
restart {
attempts = 2
interval = "1m"
delay = "15s"
mode = "delay"
}
network {
port "elastic" {
to = 9200
static = 9200
}
port "kibana" {
to = 5601
}
port "logstash" {
to = 5000
}
}
task "elasticsearch" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "slave2"
}
config {
image = "elasticsearch:7.16.2"
ports = ["elastic"]
volumes = [
"local/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml",
]
}
template {
data = <<EOF
cluster.name: "my-cluster"
network.host: 0.0.0.0
discovery.type: single-node
discovery.seed_hosts: ["127.0.0.1"]
xpack.security.enabled: true
xpack.license.self_generated.type: trial
xpack.monitoring.collection.enabled: true
EOF
destination = "local/elasticsearch.yml"
change_mode = "signal"
change_signal = "SIGHUP"
}
env {
ELASTIC_PASSWORD = "elastic"
}
service {
provider = "nomad"
name = "${TASKGROUP}-elasticsearch"
port = "elastic"
}
resources {
cpu = 500
memory = 2048
}
}
task "kibana" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "slave2"
}
config {
image = "kibana:7.16.2"
ports = ["kibana"]
volumes = [
"local/kibana.yml:/usr/share/kibana/config/kibana.yml"
]
}
template {
data = <<EOF
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://{{ env "NOMAD_IP_elk" }}:{{ env "NOMAD_PORT_elk" }}" ]
elasticsearch.username: elastic
elasticsearch.password: elastic
EOF
destination = "local/kibana.yml"
change_mode = "signal"
change_signal = "SIGHUP"
}
service {
name = "${TASKGROUP}-kibana"
port = "kibana"
provider = "nomad"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
resources {
cpu = 500
memory = 1200
}
}
task "logstash" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
value = "slave2"
}
config {
image = "logstash:7.16.2"
ports = ["logstash"]
volumes = [
"local/logstash.yml:/usr/share/logstash/config/logstash.yml"
]
}
template {
data = <<EOF
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://{{ env "NOMAD_IP_elk" }}:{{ env "NOMAD_PORT_elk" }}" ]
EOF
destination = "local/logstash.yml"
change_mode = "signal"
change_signal = "SIGHUP"
}
service {
name = "${TASKGROUP}-logstash"
port = "logstash"
provider = "nomad"
}
resources {
cpu = 200
memory = 1024
}
}
}
}
job "logs" {
datacenters = ["dc1"]
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
update {
stagger = "10s"
max_parallel = 1
}
group "elk" {
count = 1
restart {
attempts = 2
interval = "1m"
delay = "15s"
mode = "delay"
}
network {
port "elk" {
to = 9200
static = 9200
}
port "kibana" {
to = 5601
}
port "logstash" {
to = 5000
}
}
task "elasticsearch" {
driver = "docker"
vault {
policies = ["admin"]
change_mode = "signal"
change_signal = "SIGINT"
}
config {
image = "elasticsearch:7.16.2"
ports = ["elk"]
volumes = [
"local/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml",
]
}
template {
data = <<EOF
cluster.name: "my-cluster"
network.host: 0.0.0.0
discovery.type: single-node
discovery.seed_hosts: ["127.0.0.1"]
xpack.security.enabled: true
xpack.license.self_generated.type: trial
xpack.monitoring.collection.enabled: true
EOF
destination = "local/elasticsearch.yml"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOH
ELASTIC_PASSWORD="{{with secret "secret/elastic"}}{{.Data.passwd}}{{end}}"
EOH
destination = "secrets/file.env"
env = true
}
service {
name = "${TASKGROUP}-elasticsearch"
port = "elk"
}
resources {
cpu = 500
memory = 1048
}
}
task "kibana" {
driver = "docker"
vault {
policies = ["admin"]
change_mode = "signal"
change_signal = "SIGINT"
}
config {
image = "kibana:7.16.2"
ports = ["kibana"]
volumes = [
"local/kibana.yml:/usr/share/kibana/config/kibana.yml"
]
}
template {
data = <<EOF
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://{{ env "NOMAD_IP_elk" }}:{{ env "NOMAD_PORT_elk" }}" ]
elasticsearch.username: elastic
{{ with secret "secret/elastic" }}
elasticsearch.password: {{.Data.passwd}}
{{ end }}
EOF
destination = "local/kibana.yml"
change_mode = "signal"
change_signal = "SIGHUP"
}
service {
name = "${TASKGROUP}-kibana"
port = "kibana"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
resources {
cpu = 500
memory = 1200
}
}
task "logstash" {
driver = "docker"
config {
image = "logstash:7.16.2"
ports = ["logstash"]
volumes = [
"local/logstash.yml:/usr/share/logstash/config/logstash.yml"
]
}
template {
data = <<EOF
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://{{ env "NOMAD_IP_elk" }}:{{ env "NOMAD_PORT_elk" }}" ]
EOF
destination = "local/logstash.yml"
change_mode = "signal"
change_signal = "SIGHUP"
}
service {
name = "${TASKGROUP}-logstash"
port = "logstash"
}
resources {
cpu = 200
memory = 1024
}
}
}
}
job "24-paramete" {
datacenters = ["dc1"]
type = "batch"
parameterized {
payload = "forbidden"
meta_required = ["room_num"]
}
group "run-main-job" {
task "run-main-job" {
driver = "raw_exec"
config {
command = "nomad"
# arguments
args = ["job", "run", "${NOMAD_TASK_DIR}/room.job" ]
}
template {
data = <<EOH
#####################
job "{{ env "NOMAD_META_room_num" }}" {
datacenters = ["dc1"]
group "jboss" {
network {
port "http" {
to = "8080"
}
}
service {
port = "http"
provider = "nomad"
name = "{{ env "NOMAD_META_room_num" }}"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
task "http" {
driver = "docker"
config {
image = "jboss/wildfly"
ports = ["http"]
}
resources {
cpu = 500
memory = 282
}
}
}
}
EOH
destination = "local/room.job"
}
}
}
}
job "22-fastapi" {
datacenters = ["dc1"]
group "fastapi" {
network {
mode = "bridge"
#service가 80으로 뜸, 만약 다른 포트로 뜨는 서비스를 사용 할 경우 image와 to만 변경
port "http" {
to = 80
}
}
service {
name = "fastapi"
#여기서 port에 위에서 미리 선언한 http를 쓸 경우 다이나믹한 port를 가져오는데
#그럴 경우 ingress gateway에서 못 읽어 온다.
port = "80"
connect {
sidecar_service{}
}
}
task "fastapi" {
driver = "docker"
config {
image = "tiangolo/uvicorn-gunicorn-fastapi"
ports = ["http"]
}
resources {
cpu = 500
memory = 282
}
}
scaling {
enabled = true
min = 1
max = 3
policy {
evaluation_interval = "5s"
cooldown = "1m"
#driver = "nomad-apm"
check "mem_allocated_percentage" {
source = "nomad-apm"
query = "max_memory"
strategy "target-value" {
target = 80
}
}
}
}
}
}
image info : https://quay.io/repository/wildfly/wildfly?tab=info
github : https://github.com/jboss-dockerfiles/wildfly
wildfly docker example : http://www.mastertheboss.com/soa-cloud/docker/deploying-applications-on-your-docker-wildfly-image/