forked from ceph/ceph-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rolling_update.yml
233 lines (195 loc) · 5.55 KB
/
rolling_update.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
---
# This playbook does a rolling update for all the Ceph services
# Change the value of 'serial:' to adjust the number of server to be updated.
#
# The four roles that apply to the ceph hosts will be applied: ceph-common,
# ceph-mon, ceph-osd and ceph-mds. So any changes to configuration, package updates, etc,
# will be applied as part of the rolling update process.
#
# /!\ DO NOT FORGET TO CHANGE THE RELEASE VERSION FIRST! /!\
- name: confirm whether user really meant to upgrade the cluster
hosts: localhost
vars_prompt:
- name: ireallymeanit
prompt: Are you sure you want to upgrade the cluster?
default: 'no'
private: no
tasks:
- name: exit playbook, if user did not mean to upgrade cluster
fail:
msg: >
"Exiting rolling_update.yml playbook, cluster was NOT upgraded.
To upgrade the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- hosts:
- mons
- osds
- mdss
- rgws
become: True
tasks:
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: check if sysvinit
stat:
path: /etc/rc?.d/S??ceph
follow: yes
register: is_sysvinit
- name: check if upstart
stat:
path: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
register: is_upstart
- name: check if systemd
command: grep -sq systemd /proc/1/comm
register: is_systemd
- hosts: mons
serial: 1
become: True
vars:
upgrade_ceph_packages: True
mon_group_name: mons
pre_tasks:
- name: compress the store as much as possible
command: ceph tell mon.{{ ansible_hostname }} compact
roles:
- ceph-common
- ceph-mon
post_tasks:
- name: restart ceph mons with upstart
service:
name: ceph-mon
state: restarted
args: id={{ ansible_hostname }}
when: is_upstart.stat.exists == True
- name: restart ceph mons with sysvinit
service:
name: ceph
state: restarted
when: is_sysvinit.stat.exists == True
- name: restart ceph mons with systemd
service:
name: ceph-mon@{{ ansible_hostname }}
state: restarted
enabled: yes
when: is_systemd
- name: select a running monitor
set_fact: mon_host={{ item }}
with_items: groups.mons
when: item != inventory_hostname
- name: waiting for the monitor to join the quorum...
shell: |
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -sq {{ ansible_hostname }}
register: result
until: result.rc == 0
retries: 5
delay: 10
delegate_to: "{{ mon_host }}"
- hosts: osds
serial: 1
become: True
vars:
upgrade_ceph_packages: True
osd_group_name: osds
pre_tasks:
- name: set osd flags
command: ceph osd set {{ item }}
with_items:
- noout
- noscrub
- nodeep-scrub
delegate_to: "{{ groups.mons[0] }}"
roles:
- ceph-common
- ceph-osd
post_tasks:
- name: get osd numbers
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | cut -d '-' -f 2 ; fi"
register: osd_ids
changed_when: false
- name: restart ceph osds (upstart)
service:
name: ceph-osd-all
state: restarted
when: is_upstart.stat.exists == True
- name: restart ceph osds (sysvinit)
service:
name: ceph
state: restarted
when: is_sysvinit.stat.exists == True
- name: restart ceph osds (systemd)
service:
name: ceph-osd@{{item}}
state: restarted
enabled: yes
with_items: "{{ osd_ids.stdout_lines }}"
when: is_systemd
- name: waiting for clean pgs...
shell: |
test "$(ceph pg stat | sed 's/^.*pgs://;s/active+clean.*//;s/ //')" -eq "$(ceph pg stat | sed 's/pgs.*//;s/^.*://;s/ //')" && ceph health | egrep -sq "HEALTH_OK|HEALTH_WARN"
register: result
until: result.rc == 0
retries: 10
delay: 10
delegate_to: "{{ groups.mons[0] }}"
- name: unset osd flags
command: ceph osd unset {{ item }}
with_items:
- noout
- noscrub
- nodeep-scrub
delegate_to: "{{ groups.mons[0] }}"
- hosts: mdss
serial: 1
become: True
vars:
upgrade_ceph_packages: True
mds_group_name: mdss
roles:
- ceph-common
- ceph-mds
post_tasks:
- name: restart ceph mdss with upstart
service:
name: ceph-mds
state: restarted
args: id={{ ansible_hostname }}
when: is_upstart.stat.exists == True
- name: restart ceph mdss with sysvinit
service:
name: ceph
state: restarted
args: mds
when: is_sysvinit.stat.exists == True
- name: restart ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }}
state: restarted
enabled: yes
when: is_systemd
- hosts: rgws
serial: 1
become: True
vars:
upgrade_ceph_packages: True
rgw_group_name: rgws
roles:
- ceph-common
- ceph-rgw
post_tasks:
- name: restart ceph rgws with systemd
service:
name: ceph-radosgw@rgw.{{ ansible_hostname }}
state: restarted
enabled: yes
when: is_systemd
- name: restart ceph rgws with sysvinit
service:
name: radosgw
state: restarted
when: ansible_os_family != 'RedHat'
- name: restart rados gateway server(s)
service:
name: ceph-radosgw
state: restarted
when: ansible_os_family != 'RedHat'