简介
方案1: 独立创建系统模板, terraform 只负责克隆出多个系统
方案2: terraform 负责创建系统模板,也负责克隆出多个系统
方案3: packer 创建系统模板, terraform 负责克隆
系统模板分类
- 薄模板: 使用原生的 cloudimg, 搭配 cloud-init 机制, 在启动的那一刻才进行初始化;
- 厚模板: 先在基础镜像内把各类软件和基础配置都做好, 打包后使用, cloud-init 只负责简易的ip地址,主机名和用户密码;
此处我们采用方案1+厚模板方案
准备系统模板
步骤简述
- 提前从 ubuntu 官网下载 cloudimg 镜像到内网, 减少制作过程的耗时;
- 通过 virt-customize 直接修改镜像内部配置, 安装软件
- qm 创建一个空的虚拟机并导入 cloudimg 镜像磁盘文件
- 调整新虚拟机的设备详细配置和硬件规则等
- 将虚拟机转换为模板
一键脚本
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
#!/bin/bash
TEMPLATE_NAME="template-ubuntu-2404-v1"
IMAGE_FILE_NAME="ubuntu-24.04-thick.qcow2"
SOURCE_URL="http://files.services.wait/soft/noble-server-cloudimg-amd64.img"
# 1. 下载原始镜像
if [ ! -f "original.img" ]; then
wget $SOURCE_URL -O original.img
fi
# 2. 复制一份进行定制, 保持原始镜像纯净
cp original.img $IMAGE_FILE_NAME
# 3. 核心定制: 注入软件和配置
# --install: 安装必要的 agent
# --run-command: 换源
# --firstboot-command: 确保第一次开机时执行的任务
virt-customize -a $IMAGE_FILE_NAME \
--install qemu-guest-agent,net-tools,vim,curl \
--run-command "sed -i 's|http://archive.ubuntu.com/ubuntu|https://mirrors.tuna.tsinghua.edu.cn/ubuntu|g' /etc/apt/sources.list.d/ubuntu.sources" \
--run-command "sed -i 's|http://security.ubuntu.com/ubuntu|https://mirrors.tuna.tsinghua.edu.cn/ubuntu|g' /etc/apt/sources.list.d/ubuntu.sources" \
--run-command "systemctl enable qemu-guest-agent" \
--run-command "cloud-init clean --logs"
echo "厚镜像制作完成: $IMAGE_FILE_NAME"
# 推送 pve
VM_ID=9000
STORAGE="local-lvm"
qm create $VM_ID --name ${TEMPLATE_NAME} \
--cpu host \
--sockets 1 \
--cores 2 \
--memory 1024 \
--machine q35 \
--bios ovmf \
--net0 virtio,bridge=vmbr0
qm importdisk $VM_ID ${IMAGE_FILE_NAME} $STORAGE
qm set $VM_ID \
--efidisk0 $STORAGE:4,format=raw,pre-enrolled-keys=1 \
--scsihw virtio-scsi-single \
--scsi0 $STORAGE:vm-$VM_ID-disk-0,iothread=1,aio=io_uring \
--boot order=scsi0 \
--serial0 socket \
--ostype l26 \
--vga serial0 \
--keyboard en-us \
--ide2 $STORAGE:cloudinit \
--agent 1,type=virtio
qm template $VM_ID
|
terrafrom 克隆多个主机
简述
- 最好是定义一个通用的虚拟机tf模块, 方便复用
虚拟机tf模块
modules/proxmox_vm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
|
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.100.0"
}
}
}
# 子模块内的资源名称通常统一命名为 this
# 因为它是唯一的局部实例,父模块通过 module.xxx 来区分
resource "proxmox_virtual_environment_vm" "this" {
name = var.vm_name
node_name = var.target_node
vm_id = var.vm_id
bios = "ovmf"
machine = "q35"
tags = var.vm_tags
clone {
vm_id = var.template_id
# full = true # 正式环境建议开启
}
started = true
memory {
dedicated = var.memory_size
}
# 系统盘
disk {
datastore_id = var.datastore_id
interface = "scsi0"
size = var.system_disk_size
}
# 数据盘
# disk {
# datastore_id = var.datastore_id
# interface = "scsi1"
# size = var.data_disk_size
# file_format = "raw"
# iothread = true
# }
# 2. 动态数据盘 (按需生成)
dynamic "disk" {
for_each = var.additional_disks
content {
# disk.value 指向当前循环的对象
datastore_id = disk.value.datastore_id != null ? disk.value.datastore_id : var.datastore_id
interface = disk.value.interface
size = disk.value.size
file_format = "raw"
iothread = disk.value.iothread
}
}
initialization {
datastore_id = var.datastore_id
interface = "ide2"
ip_config {
ipv4 {
address = var.ipv4_address
gateway = var.ipv4_gateway
}
}
dns {
servers = ["192.168.5.9", "8.8.8.8"]
}
user_account {
username = "ubuntu"
# password = "passw0rd"
keys = [
var.ssh_public_key_ubuntu
]
}
}
lifecycle {
ignore_changes = [
network_device[0].mac_address,
initialization[0].user_account[0].keys,
]
}
}
output "vm_id" {
description = "创建的虚拟机 ID"
value = proxmox_virtual_environment_vm.this.vm_id
}
output "vm_name" {
value = proxmox_virtual_environment_vm.this.name
}
output "ipv4_address" {
description = "虚拟机的 IPv4 地址"
# 注意: 这里取决于 Provider 返回值, 通常从 initialization 或 network_interface 获取
value = proxmox_virtual_environment_vm.this.initialization[0].ip_config[0].ipv4[0].address
}
|
variable.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
variable "vm_name" {
description = "虚拟机名称"
type = string
}
variable "vm_id" {
description = "虚拟机 ID"
type = number
}
variable "target_node" {
description = "目标 PVE 节点"
type = string
}
variable "template_id" {
description = "源模板的 ID"
type = string
}
variable "ipv4_address" {
description = "IPv4 地址 (CIDR 格式)"
type = string
}
variable "ipv4_gateway" {
description = "网关地址"
type = string
}
variable "memory_size" {
description = "内存大小"
type = number
default = 1024
}
variable "system_disk_size" {
type = number
default = 30
}
variable "data_disk_size" {
type = number
default = 20
}
variable "datastore_id" {
default = "local-lvm"
}
variable "vm_tags" {
type = list(string)
description = "虚拟机标签列表"
default = ["terraform", "ubuntu"]
}
variable "additional_disks" {
description = "额外的磁盘列表配置"
type = list(object({
interface = string
size = number
datastore_id = optional(string) # 设为可选,默认可继承全局配置
iothread = optional(bool, true)
}))
default = [] # 默认为空列表,即不增加数据盘
}
variable "ssh_public_key_ubuntu" {
description = "Ubuntu 用户 SSH 公钥内容"
type = string
}
|
root模块内创建多个主机
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
|
# 准备 ssh 公钥
data "local_file" "ubuntu_key" {
filename = "/home/user1/keys/id_ed25519.pub"
}
# 使用数据源去 Proxmox 查找已存在的模板, 即刚才创建的 vm 9000
data "proxmox_virtual_environment_vms" "ubuntu_template_v1" {
node_name = var.default_proxmox_node
filter {
name = "name"
values = ["template-ubuntu-2404-v1"]
}
}
module "vm_k8s_nodes" {
# 引用刚才的模块
source = "./modules/proxmox_vm"
for_each = var.servers
# 传入具体参数
vm_name = each.value.name
vm_id = each.value.id
target_node = var.default_proxmox_node
template_id = data.proxmox_virtual_environment_vms.ubuntu_template_v1.vms[0].vm_id
# template_id = 9000
ssh_public_key_ubuntu = data.local_file.ubuntu_key.content
memory_size = 1024
system_disk_size = 30
ipv4_address = "${each.value.ip}"
ipv4_gateway = "192.168.5.254"
vm_tags = ["terraform", "ubuntu", "k8s"]
}
# 自动生成 ansible 清单文件
resource "local_file" "tf_ansible_inventory_file" {
content = <<-EOF
[k8s]
%{for vm in var.servers~}
${vm.name} ansible_host=${split("/", vm.ip)[0]}
%{endfor~}
EOF
filename = "${path.module}/../../../ansible/inventory.ini"
file_permission = "0644"
}
|
注意
terraform 我这里使用了pg和vault, 非必须, 重点是 proxmox 那段配置
providers.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "~>0.100.0"
}
vault = {
source = "hashicorp/vault"
version = "5.8.0"
}
}
backend "pg" {
schema_name = "proxmox"
}
}
provider "vault" {
auth_login {
path = "auth/approle/login"
parameters = {
role_id = var.vault_role_id
secret_id = var.vault_secret_id
}
}
}
ephemeral "vault_kv_secret_v2" "pve_creds" {
mount = "project1"
name = "infra/proxmox"
}
provider "proxmox" {
# endpoint = var.proxmox_endpoint
endpoint = "${ephemeral.vault_kv_secret_v2.pve_creds.data["api_address"]}"
# api_token = var.proxmox_api_token
api_token = "${ephemeral.vault_kv_secret_v2.pve_creds.data["api_id"]}=${ephemeral.vault_kv_secret_v2.pve_creds.data["api_secret"]}"
# 是否使用自签名证书
insecure = true
# 某些操作需要 SSH 登录pve服务器, 可选配置 SSH 连接
ssh {
agent = true
username = "root"
private_key = file("/home/user1/keys/id_ed25519")
node {
name = "pve"
address = "192.168.5.9"
}
node {
name = "pve1"
address = "192.168.5.10"
port = 22
}
}
}
|
variables.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
variable "proxmox_endpoint" {
type = string
description = "IP of Proxmox server (mandatory)"
default = ""
}
variable "proxmox_api_token" {
type = string
sensitive = true
default = ""
}
variable "default_proxmox_node" {
type = string
default = ""
description = "default proxmox node"
}
variable "default_datastore_node10" {
type = string
description = "node10 store disk id"
default = "local-lvm"
}
variable "vault_role_id" {
type = string
sensitive = true
}
variable "vault_secret_id" {
type = string
sensitive = true
}
variable "servers" {
description = "K8s nodes configuration"
type = map(object({
id = number
name = string
ip = string
}))
default = {}
}
|
terraform.tfvars
1
2
3
4
5
6
|
default_proxmox_node = "pve"
servers = {
node1 = { id = 301, name = "k8s-node1", ip = "192.168.5.24/24" }
node2 = { id = 302, name = "k8s-node2", ip = "192.168.5.27/24" }
}
|