Initial commit.
This commit is contained in:
parent
193ee656f8
commit
17d579a6eb
18 changed files with 797 additions and 0 deletions
11
.gitignore
vendored
11
.gitignore
vendored
|
@ -0,0 +1,11 @@
|
|||
.*.sw*
|
||||
.terraform*
|
||||
terraform.tfstate*
|
||||
privkey.pem
|
||||
inventory.yaml
|
||||
config.mk
|
||||
terraform.mk
|
||||
terraform.tfstate*
|
||||
config.tf
|
||||
privkey
|
||||
pubkey
|
36
Makefile
Normal file
36
Makefile
Normal file
|
@ -0,0 +1,36 @@
|
|||
|
||||
|
||||
define CONFIG_MSG =
|
||||
|
||||
You do not have a config.mk file.
|
||||
|
||||
Please run "./configure" or copy `config.mk.in` to `config.mk` and edit its settings
|
||||
|
||||
endef
|
||||
|
||||
|
||||
|
||||
default: terraform ansible
|
||||
|
||||
terraform ansible: config.mk
|
||||
@$(MAKE) -C $@
|
||||
|
||||
ssh: config.mk
|
||||
$(MAKE) -C ansible ssh
|
||||
|
||||
#ansible:
|
||||
# @$(MAKE) -C ansible
|
||||
|
||||
config.mk:
|
||||
@ $(info $(CONFIG_MSG) )
|
||||
@ exit 1
|
||||
|
||||
clean:
|
||||
rm -f config.mk
|
||||
@$(MAKE) -C terraform clean
|
||||
@$(MAKE) -C ansible clean
|
||||
|
||||
|
||||
.PHONY: terraform ansible
|
||||
|
||||
|
11
README.md
11
README.md
|
@ -0,0 +1,11 @@
|
|||
The intent here is to create an all-in-one social server build
|
||||
|
||||
## Requirements
|
||||
|
||||
* GNU Make
|
||||
* Ansible
|
||||
* Terraform
|
||||
* AWS CLI
|
||||
* AWS SessionManager plugin (http://docs.aws.amazon.com/console/systems-manager/session-manager-plugin-not-found)
|
||||
|
||||
|
61
ansible/Makefile
Normal file
61
ansible/Makefile
Normal file
|
@ -0,0 +1,61 @@
|
|||
|
||||
include ../config.mk
|
||||
include ../terraform/terraform.mk
|
||||
|
||||
# XXX parameterize
|
||||
AWS_REGION = us-west-2
|
||||
|
||||
SSH := ssh -o "StrictHostKeyChecking=no" -o UserKnownHostsFile=/dev/null -o ProxyCommand="sh -c \"aws --region $(AWS_REGION) ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\"" -i ../terraform/privkey -l ubuntu
|
||||
|
||||
default: ansible
|
||||
|
||||
ansible: toolcheck inventory.yaml
|
||||
timeout --foreground 300 bash -c -- 'until $(SSH) $(INSTANCE_ID) "/bin/true"; do sleep 0.5; done'
|
||||
export ANSIBLE_NOCOWS=1; ansible-playbook -i inventory.yaml --private-key ../terraform/privkey -l social site.yaml
|
||||
|
||||
ssh: inventory.yaml
|
||||
$(SSH) $(INSTANCE_ID)
|
||||
|
||||
inventory.yaml: inventory.tmpl.yaml sedline
|
||||
sed $(SEDLINE) inventory.tmpl.yaml > inventory.yaml
|
||||
|
||||
SEDLINE =
|
||||
|
||||
sedline: terraform_sedline config_sedline
|
||||
|
||||
config_sedline: $(addprefix __sed_,$(shell grep '^[0-9A-Z_]' ../config.mk | awk '{print $$1}'))
|
||||
|
||||
terraform_sedline: $(addprefix __sed_,$(shell grep '^[0-9A-Z_]' ../terraform/terraform.mk | awk '{print $$1}'))
|
||||
|
||||
__sed_%:
|
||||
$(eval SEDLINE := $$(SEDLINE) -e 's/{{$*}}/$($*)/')
|
||||
|
||||
|
||||
|
||||
#
|
||||
#TF_OUTPUTS =
|
||||
#
|
||||
#tf_outputs: ../terraform/terraform.tfstate
|
||||
#
|
||||
|
||||
# FIXME: DRY this target
|
||||
|
||||
CHECK_TOOLS = ansible
|
||||
|
||||
toolcheck:
|
||||
@echo
|
||||
@echo "Checking applications..."
|
||||
@ FAIL=""; \
|
||||
for TOOL in $(CHECK_TOOLS); do \
|
||||
which $${TOOL} >/dev/null || FAIL="$${FAIL} $${TOOL}"; \
|
||||
done; \
|
||||
if test -n "$${FAIL}"; then \
|
||||
echo "ERROR: You are missing the following:$${FAIL}"; \
|
||||
echo "Please make sure all necessary tools are installed and available in your path"; \
|
||||
echo; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
@echo
|
||||
|
||||
|
13
ansible/inventory.tmpl.yaml
Normal file
13
ansible/inventory.tmpl.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
social:
|
||||
hosts:
|
||||
social:
|
||||
ansible_host: "{{INSTANCE_ID}}"
|
||||
ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand="sh -c \"aws --region {{AWS_REGION}} ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\""
|
||||
ansible_user: ubuntu
|
||||
hostname: social
|
||||
vars:
|
||||
ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ProxyCommand="sh -c \"aws --region {{AWS_REGION}} ssm start-session --target %h --document-name AWS-StartSSHSession --parameters portNumber=%p\""
|
||||
public_ip: "{{PUBLIC_IP}}"
|
||||
mastodon_sidekiq_count: {{MASTODON_SIDEKIQ_COUNT}}
|
||||
mastodon_sidekiq_threads: {{MASTODON_SIDEKIQ_THREADS}}
|
||||
|
6
ansible/roles/common/handlers/main.yml
Normal file
6
ansible/roles/common/handlers/main.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
|
||||
- name: reboot
|
||||
reboot:
|
||||
reboot_timeout: 3600
|
||||
|
67
ansible/roles/common/tasks/main.yaml
Normal file
67
ansible/roles/common/tasks/main.yaml
Normal file
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
|
||||
# configure system
|
||||
|
||||
- name: Configure hostname
|
||||
copy:
|
||||
content: "{{ hostname }}"
|
||||
dest: /etc/hostname
|
||||
notify: reboot
|
||||
# yep we reboot for this
|
||||
|
||||
- name: hostname in hosts
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
regexp: "^127.0.0.1"
|
||||
line: "127.0.0.1 {{ hostname }} localhost"
|
||||
|
||||
- name: Set timezone
|
||||
file:
|
||||
src: /usr/share/zoneinfo/America/Los_Angeles
|
||||
dest: /etc/localtime
|
||||
state: link
|
||||
notify: reboot
|
||||
|
||||
- name: Set keyboard
|
||||
lineinfile:
|
||||
path: /etc/default/keyboard
|
||||
regexp: '^XKBLAYOUT='
|
||||
line: 'XKBLAYOUT="us"'
|
||||
notify: reboot
|
||||
|
||||
- name: Shaboom!!!
|
||||
apt:
|
||||
update_cache: yes
|
||||
upgrade: dist
|
||||
force_apt_get: yes
|
||||
retries: 2
|
||||
delay: 10
|
||||
|
||||
- name: install base apps
|
||||
apt:
|
||||
force_apt_get: yes
|
||||
name:
|
||||
- vim
|
||||
- less
|
||||
- tmux
|
||||
- telnet
|
||||
- ntp
|
||||
- lsof
|
||||
|
||||
- name: edit bashrc
|
||||
blockinfile:
|
||||
path: /etc/bash.bashrc
|
||||
marker: "### {mark} ANSIBLE MANAGED BLOCK {{ item.name }} ###"
|
||||
block: "{{ item.block }}"
|
||||
with_items:
|
||||
- name: prompt
|
||||
block: |
|
||||
if [[ $USER == 'root' ]]; then
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;33m\]\w\[\033[00m\]# '
|
||||
else
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;36m\]\u@\h\[\033[00m\]:\[\033[01;32m\]\w\[\033[00m\]\$ '
|
||||
fi
|
||||
- name: lscolor
|
||||
block: |
|
||||
alias ls='ls --color=auto'
|
||||
|
28
ansible/roles/mastodon/tasks/main.yaml
Normal file
28
ansible/roles/mastodon/tasks/main.yaml
Normal file
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
|
||||
- name: install base apps
|
||||
apt:
|
||||
force_apt_get: yes
|
||||
name:
|
||||
- docker-compose-v2
|
||||
- git
|
||||
|
||||
- name: Mastodon path
|
||||
file:
|
||||
path: "/srv/mastodon"
|
||||
state: directory
|
||||
recurse: true
|
||||
|
||||
- name: mastodon source
|
||||
git:
|
||||
repo: "https://tea.entar.net/teh/mastodon.git"
|
||||
dest: /srv/mastodon/src
|
||||
|
||||
- name: mastodon docker-compose
|
||||
template:
|
||||
src: templates/docker-compose.mastodon.yaml
|
||||
dest: /srv/mastodon/docker-compose.yaml
|
||||
|
||||
# add env file
|
||||
|
||||
|
166
ansible/roles/mastodon/templates/docker-compose.mastodon.yaml
Normal file
166
ansible/roles/mastodon/templates/docker-compose.mastodon.yaml
Normal file
|
@ -0,0 +1,166 @@
|
|||
version: '3'
|
||||
services:
|
||||
mastodon_db:
|
||||
container_name: mastodon_db
|
||||
restart: always
|
||||
image: postgres:14-alpine
|
||||
shm_size: 256mb
|
||||
networks:
|
||||
- mastodon
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres']
|
||||
volumes:
|
||||
- ./postgres14:/var/lib/postgresql/data
|
||||
env_file: .env.production
|
||||
environment:
|
||||
- 'POSTGRES_HOST_AUTH_METHOD=trust'
|
||||
command: postgres -c 'max_connections={{mastodon_sidekiq_count * mastodon_sidekiq_threads + 50}}'
|
||||
|
||||
mastodon_redis:
|
||||
container_name: mastodon_redis
|
||||
restart: always
|
||||
image: redis:7-alpine
|
||||
networks:
|
||||
- mastodon
|
||||
healthcheck:
|
||||
test: ['CMD', 'redis-cli', 'ping']
|
||||
volumes:
|
||||
- ./redis:/data
|
||||
|
||||
# es:
|
||||
# restart: always
|
||||
# image: docker.elastic.co/elasticsearch/elasticsearch:7.17.4
|
||||
# environment:
|
||||
# - "ES_JAVA_OPTS=-Xms512m -Xmx512m -Des.enforce.bootstrap.checks=true"
|
||||
# - "xpack.license.self_generated.type=basic"
|
||||
# - "xpack.security.enabled=false"
|
||||
# - "xpack.watcher.enabled=false"
|
||||
# - "xpack.graph.enabled=false"
|
||||
# - "xpack.ml.enabled=false"
|
||||
# - "bootstrap.memory_lock=true"
|
||||
# - "cluster.name=es-mastodon"
|
||||
# - "discovery.type=single-node"
|
||||
# - "thread_pool.write.queue_size=1000"
|
||||
# networks:
|
||||
# - mastodon
|
||||
# - nginx
|
||||
# healthcheck:
|
||||
# test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
|
||||
# volumes:
|
||||
# - ./elasticsearch:/usr/share/elasticsearch/data
|
||||
# ulimits:
|
||||
# memlock:
|
||||
# soft: -1
|
||||
# hard: -1
|
||||
# nofile:
|
||||
# soft: 65536
|
||||
# hard: 65536
|
||||
# ports:
|
||||
# - '127.0.0.1:9200:9200'
|
||||
|
||||
mastodon_web:
|
||||
container_name: mastodon_web
|
||||
build: src
|
||||
image: ghcr.io/mastodon/mastodon:v4.2.1
|
||||
restart: always
|
||||
env_file: .env.production
|
||||
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"
|
||||
networks:
|
||||
- mastodon
|
||||
- nginx
|
||||
healthcheck:
|
||||
# prettier-ignore
|
||||
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:3000/health || exit 1']
|
||||
ports:
|
||||
- '127.0.0.1:3000:3000'
|
||||
depends_on:
|
||||
- mastodon_db
|
||||
- mastodon_redis
|
||||
# - es
|
||||
volumes:
|
||||
- ./public/system:/mastodon/public/system
|
||||
|
||||
mastodon_streaming:
|
||||
container_name: mastodon_streaming
|
||||
build: src
|
||||
image: ghcr.io/mastodon/mastodon:v4.2.1
|
||||
restart: always
|
||||
env_file: .env.production
|
||||
environment:
|
||||
- PORT=5000
|
||||
command: node ./streaming
|
||||
networks:
|
||||
- mastodon
|
||||
- nginx
|
||||
healthcheck:
|
||||
# prettier-ignore
|
||||
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
|
||||
ports:
|
||||
- '127.0.0.1:5000:5000'
|
||||
depends_on:
|
||||
- mastodon_db
|
||||
- mastodon_redis
|
||||
|
||||
{% for i in range(mastodon_sidekiq_count) %}
|
||||
mastodon_sidekiq_{{i}}:
|
||||
container_name: mastodon_sidekiq
|
||||
build: src
|
||||
image: ghcr.io/mastodon/mastodon:v4.2.1
|
||||
restart: always
|
||||
env_file: .env.production
|
||||
environment:
|
||||
- DB_POOL={{ mastodon_sidekiq_threads}}
|
||||
command: bundle exec sidekiq -c {{ mastodon_sidekiq_threads}}
|
||||
depends_on:
|
||||
- mastodon_db
|
||||
- mastodon_redis
|
||||
networks:
|
||||
- mastodon
|
||||
- nginx
|
||||
volumes:
|
||||
- ./public/system:/mastodon/public/system
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
|
||||
|
||||
{% endfor %}
|
||||
|
||||
## Uncomment to enable federation with tor instances along with adding the following ENV variables
|
||||
## http_proxy=http://privoxy:8118
|
||||
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
|
||||
# tor:
|
||||
# image: sirboops/tor
|
||||
# networks:
|
||||
# - external_network
|
||||
# - internal_network
|
||||
#
|
||||
# privoxy:
|
||||
# image: sirboops/privoxy
|
||||
# volumes:
|
||||
# - ./priv-config:/opt/config
|
||||
# networks:
|
||||
# - external_network
|
||||
# - internal_network
|
||||
|
||||
statsd:
|
||||
image: prom/statsd-exporter
|
||||
container_name: mastodon_statsd
|
||||
restart: always
|
||||
ports:
|
||||
- 0.0.0.0:9102:9102
|
||||
command:
|
||||
"--statsd.mapping-config=/statsd-mapping.yaml"
|
||||
volumes:
|
||||
- ./statsd-mapping.yaml:/statsd-mapping.yaml
|
||||
networks:
|
||||
- mastodon
|
||||
|
||||
|
||||
networks:
|
||||
mastodon:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.42.0.0/16
|
||||
nginx:
|
||||
external: true
|
||||
|
12
ansible/site.yaml
Normal file
12
ansible/site.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
|
||||
- name: apply common config
|
||||
hosts: all
|
||||
roles:
|
||||
- { role: common, become: yes }
|
||||
|
||||
- name: mastodon instance
|
||||
hosts: social
|
||||
roles:
|
||||
- { role: mastodon, become: yes }
|
||||
|
36
config.mk.in
Normal file
36
config.mk.in
Normal file
|
@ -0,0 +1,36 @@
|
|||
|
||||
## config.mk.in
|
||||
##
|
||||
## Template for configuration.
|
||||
## The comment preceding any variable is printed as its prompt.
|
||||
|
||||
# Right now AWS is the only option. This is only here for future use.
|
||||
|
||||
# AWS: Are we using AWS? 1 means yes, 0 means no (only 1 works right now!)
|
||||
#AWS = 1
|
||||
|
||||
|
||||
# AWS_REGION: what region is all this stuff going in?
|
||||
AWS_REGION = us-west-2
|
||||
|
||||
# Instance type is configured for a single instance only
|
||||
|
||||
# AWS_SINGLE_INSTANCE_TYPE: What size instance should we be using
|
||||
AWS_SINGLE_INSTANCE_TYPE = t4g.small
|
||||
|
||||
# Paste (one-line!) the root SSH public key for the AWS instance, or leave blank to generate new private/public keys
|
||||
AWS_INSTANCE_PUBLIC_KEY =
|
||||
|
||||
# What is the DNS subdomain to be delegated for these services? Leave blank to skip.
|
||||
AWS_ROUTE53_ZONE =
|
||||
|
||||
|
||||
|
||||
# TODO: more detailed sidekiq tuning per https://thomas-leister.de/en/scaling-up-mastodon/
|
||||
|
||||
# How many sidekiq containers should Mastodon have?
|
||||
MASTODON_SIDEKIQ_COUNT = 2
|
||||
|
||||
# How many threads in each sidekiq container?
|
||||
MASTODON_SIDEKIQ_THREADS = 100
|
||||
|
62
configure
vendored
Executable file
62
configure
vendored
Executable file
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
|
||||
|
||||
# I acknowledge that this is weird. Autoconf/automake are just too heavy for this task.
|
||||
|
||||
echo "Configuring. Please answer the following questions:"
|
||||
|
||||
# Read through config.mk.in. Every time a variable pops up, emit the comment
|
||||
# before it and prompt the user for a value, using the setting in config.mk.in as a default
|
||||
|
||||
TEMPFILE=.config.temp
|
||||
declare -A CONFIG
|
||||
|
||||
COMMENT=""
|
||||
|
||||
echo -n > $TEMPFILE
|
||||
|
||||
while read LINE <&4; do
|
||||
if echo "$LINE" | grep -q '^#'; then
|
||||
COMMENT=$(echo "$LINE" | sed -e 's/^#\+ *//')
|
||||
elif echo "$LINE" | grep -q '^[0-9A-Z_]\+ *= *'; then
|
||||
|
||||
VARNAME=$(echo "$LINE" | sed -e 's/ *=.*//')
|
||||
DEFAULT=$(echo "$LINE" | sed -e 's/^[0-9A-Z_]\+ *= *//')
|
||||
|
||||
# if there are prefix vars that are false, we need to skip
|
||||
SKIP=0
|
||||
for K in "${!CONFIG[@]}"; do
|
||||
if echo "$VARNAME" | grep -q "^${K}"; then
|
||||
if [[ ${CONFIG[$K]} -eq 0 ]]; then
|
||||
SKIP=1
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ SKIP -eq 0 ]]; then
|
||||
echo
|
||||
echo "$COMMENT"
|
||||
#echo "$LINE"
|
||||
echo "(default: ${DEFAULT})"
|
||||
echo -n "> "
|
||||
read VALUE
|
||||
|
||||
if [[ -z $VALUE ]]; then
|
||||
VALUE="${DEFAULT}"
|
||||
fi
|
||||
|
||||
CONFIG[${VARNAME}]="${VALUE}"
|
||||
|
||||
echo "# ${COMMENT}" >> $TEMPFILE
|
||||
echo "${VARNAME} = ${VALUE}" >> $TEMPFILE
|
||||
echo >> $TEMPFILE
|
||||
fi
|
||||
fi
|
||||
done 4< <(cat config.mk.in)
|
||||
|
||||
echo
|
||||
echo "All done! Putting your configuration into config.mk"
|
||||
|
||||
mv $TEMPFILE config.mk
|
||||
|
70
terraform/Makefile
Normal file
70
terraform/Makefile
Normal file
|
@ -0,0 +1,70 @@
|
|||
|
||||
include ../config.mk
|
||||
|
||||
default: terraform
|
||||
|
||||
# I hate sed too and I am so sorry for what I'm about to do
|
||||
terraform: terraform-check *.tf
|
||||
terraform init
|
||||
terraform apply
|
||||
terraform output | sed \
|
||||
-e 's/\(.*\) = /\U\1 = /' \
|
||||
-e 's/"//g' \
|
||||
-e '' \
|
||||
-e 'h;s/.* = //;s/\([.]\)/\\\1/g;x;s/ = .*//;G;s/\n/ = /' \
|
||||
-e 's/ \+$$//' \
|
||||
> terraform.mk
|
||||
|
||||
terraform-check: toolcheck pubkey
|
||||
$(eval AWS_INSTANCE_PUBLIC_KEY := $(shell sed -e 's/\//\\\//g' pubkey))
|
||||
|
||||
|
||||
#CONFIGVARS = AWS_INSTANCE_PUBLIC_KEY AWS_SINGLE_INSTANCE_TYPE
|
||||
SEDLINE =
|
||||
|
||||
config.tf: sedline config.tf.in ../config.mk
|
||||
sed $(SEDLINE) config.tf.in > config.tf
|
||||
|
||||
#configvars: ../config.mk
|
||||
# $(eval CONFIGVARS := $(shell grep '^[0-9A-Z]' ../config.mk | awk '{print $1}'))
|
||||
# echo $(CONFIGVARS)
|
||||
|
||||
#sedline: configvars $(addprefix __sed_,$(CONFIGVARS))
|
||||
sedline: $(addprefix __sed_,$(shell grep '^[0-9A-Z_]' ../config.mk | awk '{print $$1}'))
|
||||
|
||||
__sed_%:
|
||||
$(eval SEDLINE := $$(SEDLINE) -e 's/{{$*}}/$($*)/')
|
||||
|
||||
|
||||
CHECK_TOOLS = terraform aws
|
||||
|
||||
toolcheck:
|
||||
@echo
|
||||
@echo "Checking applications..."
|
||||
@ FAIL=""; \
|
||||
for TOOL in $(CHECK_TOOLS); do \
|
||||
which $${TOOL} >/dev/null || FAIL="$${FAIL} $${TOOL}"; \
|
||||
done; \
|
||||
if test -n "$${FAIL}"; then \
|
||||
echo "ERROR: You are missing the following:$${FAIL}"; \
|
||||
echo "Please make sure all necessary tools are installed and available in your path"; \
|
||||
echo; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
@echo
|
||||
@echo "Checking AWS configuration..."
|
||||
aws iam get-user
|
||||
|
||||
pubkey:
|
||||
if test -n "$(AWS_INSTANCE_PUBLIC_KEY)"; then \
|
||||
echo "$(AWS_INSTANCE_PUBLIC_KEY)" > pubkey; \
|
||||
else \
|
||||
ssh-keygen -t rsa -N "" -f privkey && mv privkey.pub pubkey; \
|
||||
fi
|
||||
|
||||
# clean doesn't touch tfstate because we're not insane
|
||||
clean:
|
||||
rm -f privkey pubkey
|
||||
rm -rf .terraform*
|
||||
|
8
terraform/config.tf.in
Normal file
8
terraform/config.tf.in
Normal file
|
@ -0,0 +1,8 @@
|
|||
|
||||
locals {
|
||||
public_key = "{{AWS_INSTANCE_PUBLIC_KEY}}"
|
||||
instance_type = "{{AWS_SINGLE_INSTANCE_TYPE}}"
|
||||
route53_zone = "{{AWS_ROUTE53_ZONE}}"
|
||||
aws_region = "{{AWS_REGION}}"
|
||||
}
|
||||
|
118
terraform/main.tf
Normal file
118
terraform/main.tf
Normal file
|
@ -0,0 +1,118 @@
|
|||
|
||||
|
||||
# [X] aws provider
|
||||
# [/] random pet
|
||||
# not needed w/o s3 bucket
|
||||
# [/] s3 bucket
|
||||
# [/] use pet name!
|
||||
# n/a
|
||||
# [X] vpc
|
||||
# [/] tls private key
|
||||
# [X] aws key pair
|
||||
# [/] aws key local file
|
||||
# [X] instance
|
||||
# [ ] "myip"
|
||||
# [X] sg
|
||||
# [X] EIP
|
||||
# [X] iam_instance_profile
|
||||
# [X] iam_role
|
||||
# [X] policydoc
|
||||
# [X] policy
|
||||
# [X] policy attachment
|
||||
# [X] iam policy data
|
||||
# [ ] route53 records
|
||||
# [/] adminpass for nextcloud
|
||||
# [ ] outputs:
|
||||
# [ ] instance ID
|
||||
# [ ] public IP
|
||||
# [ ] name servers
|
||||
# [ ] bucket
|
||||
# [ ] myip
|
||||
|
||||
|
||||
provider "aws" {
|
||||
region = local.aws_region
|
||||
}
|
||||
|
||||
#resource "random_pet" "name" ()
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
|
||||
name = "social-vpc"
|
||||
cidr = "10.42.0.0/16"
|
||||
|
||||
azs = [ "${local.aws_region}a" ] # XXX probably a better way to pick AZs
|
||||
private_subnets = [ "10.42.0.0/20" ]
|
||||
public_subnets = [ "10.42.16.0/20" ]
|
||||
|
||||
enable_nat_gateway = false # nat gateways cost money and who has any of that?
|
||||
enable_vpn_gateway = false
|
||||
}
|
||||
|
||||
resource "aws_instance" "social" {
|
||||
ami = data.aws_ami.ubuntu.id
|
||||
instance_type = local.instance_type
|
||||
subnet_id = module.vpc.public_subnets.0
|
||||
key_name = aws_key_pair.key.key_name
|
||||
iam_instance_profile = aws_iam_instance_profile.ssm.name
|
||||
|
||||
vpc_security_group_ids = [ module.sg.security_group_id ]
|
||||
|
||||
user_data = <<EOF
|
||||
#!/bin/bash
|
||||
set -e
|
||||
sudo snap install amazon-ssm-agent --classic
|
||||
sudo apt-get -y --no-install-recommends install ansible
|
||||
EOF
|
||||
|
||||
tags = { Name = "social" }
|
||||
}
|
||||
|
||||
resource "aws_eip" "social" {
|
||||
domain = "vpc"
|
||||
instance = aws_instance.social.id
|
||||
}
|
||||
|
||||
module "sg" {
|
||||
source = "terraform-aws-modules/security-group/aws"
|
||||
|
||||
name = "social"
|
||||
description = "social SG"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
egress_rules = [ "all-all" ]
|
||||
|
||||
ingress_with_cidr_blocks = [
|
||||
# {
|
||||
# rule = "http-80-tcp"
|
||||
# cidr_blocks = "0.0.0.0/0"
|
||||
# },
|
||||
# {
|
||||
# },
|
||||
# {
|
||||
# }
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_key_pair" "key" {
|
||||
key_name = "social"
|
||||
public_key = local.public_key
|
||||
}
|
||||
|
||||
data "aws_ami" "ubuntu" {
|
||||
most_recent = true
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-arm64-server-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
owners = ["099720109477"] # Canonical
|
||||
}
|
||||
|
15
terraform/outputs.tf
Normal file
15
terraform/outputs.tf
Normal file
|
@ -0,0 +1,15 @@
|
|||
|
||||
output "instance_id" {
|
||||
value = aws_instance.social.id
|
||||
}
|
||||
output "public_ip" {
|
||||
value = aws_instance.social.public_ip
|
||||
}
|
||||
output "nameservers" {
|
||||
value = length(module.zone) == 0 ? "" : module.zone.0.route53_zone_name_servers
|
||||
}
|
||||
#output "bucket" {
|
||||
#}
|
||||
#output "myip" {
|
||||
#}
|
||||
|
33
terraform/route53.tf
Normal file
33
terraform/route53.tf
Normal file
|
@ -0,0 +1,33 @@
|
|||
|
||||
module "zone" {
|
||||
count = local.route53_zone == "" ? 0 : 1
|
||||
|
||||
source = "terraform-aws-modules/route53/aws//modules/zones"
|
||||
version = "~> 2.0"
|
||||
|
||||
zones = {
|
||||
"${local.route53_zone}" = { comment = "${local.route53_zone}" }
|
||||
}
|
||||
}
|
||||
|
||||
module "records" {
|
||||
count = local.route53_zone == "" ? 0 : 1
|
||||
|
||||
source = "terraform-aws-modules/route53/aws//modules/records"
|
||||
version = "~> 2.0"
|
||||
zone_name = keys(module.zone.0.route53_zone_zone_id)[0]
|
||||
|
||||
records = [
|
||||
{
|
||||
name = ""
|
||||
type = "A"
|
||||
ttl = 600 # 10 minutes
|
||||
records = [ aws_instance.social.public_ip ]
|
||||
},
|
||||
]
|
||||
|
||||
depends_on = [module.zone]
|
||||
}
|
||||
|
||||
|
||||
|
44
terraform/ssm.tf
Normal file
44
terraform/ssm.tf
Normal file
|
@ -0,0 +1,44 @@
|
|||
|
||||
# SSM permissions
|
||||
|
||||
resource "aws_iam_role" "ssm" {
|
||||
name = "social_ssm"
|
||||
assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json
|
||||
path = "/"
|
||||
description = "SSM permissions for social server"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "assume_role_policy" {
|
||||
statement {
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = ["ec2.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "ssm" {
|
||||
name = "social_ssm"
|
||||
policy = data.aws_iam_policy.ssm.policy
|
||||
path = "/"
|
||||
description = "SSM permissions for social"
|
||||
}
|
||||
|
||||
data "aws_iam_policy" "ssm" {
|
||||
arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "ssm" {
|
||||
role = aws_iam_role.ssm.name
|
||||
policy_arn = aws_iam_policy.ssm.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "ssm" {
|
||||
name = "social_ssm"
|
||||
role = aws_iam_role.ssm.name
|
||||
path = "/"
|
||||
}
|
||||
|
||||
|
Loading…
Reference in a new issue