-
Notifications
You must be signed in to change notification settings - Fork 1
/
EmApiClass.py
219 lines (192 loc) · 8.99 KB
/
EmApiClass.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
from typing import Any, List
import requests
from requests.auth import HTTPBasicAuth
import datetime
import urllib3
import re
from rich.progress import track
from typing import Dict, List, Optional
from capacity_sorter import capacity_sorter
urllib3.disable_warnings()
class EmClass:
"""
Enterprise Manager API Class
Initial creation of the class sets the port and basic API headers
Methods:
login - logs into the API and sets the access token
get_bu_servers - gets a list of the backup servers associated with the Enterprise Manager
get_jobs - gets a list of all the jobs associated with the Veeam server
get_vm_jobs - gets the vms in the jobs above
get_buf_ids - gets the backup file ids
get_backup_files - gets the backup files - take a interger for the quantity of days to analyse
filter_jobs - filters the backup files by the active jobs
add_vm_details - adds VM details to the backup_details object
add_repo_details - adds repo details to the backup_details object
get_repos - gets repo information - name & capacity
"""
def __init__(self) -> None:
self.port = 9398
self.headers: Dict[Optional[str], Optional[str]] = {"Accept": "application/json"}
def _get_data(self, url: str) -> Any:
data = requests.get(url, headers=self.headers, verify=False)
return data.json()
def set_threads(self, threads: int) -> None:
self.threads = threads
def login(self, address: str, username: str, password: str) -> int:
self.__address = address
self.__username = username
self.__password = password
self.login_url = f"https://{self.__address}:{self.port}/api/sessionMngr/?v=v1_6"
self.base_url = f"https://{self.__address}:{self.port}/api"
auth = HTTPBasicAuth(self.__username, self.__password)
res = requests.post(self.login_url, auth=auth, verify=False)
self.token = res.headers.get('X-RestSvcSessionId')
self.headers['X-RestSvcSessionId'] = self.token
return res.status_code
def get_address(self) -> str:
return self.__address
# Adds token to the header
def set_headers(self, token: str) -> None:
self.headers['X-RestSvcSessionId'] = token
def set_host(self, host: str) -> None:
self.__address = host
self.base_url = f"https://{self.__address}:{self.port}/api"
# Gets and sets the bu server names
def get_bu_servers(self) -> None:
bu_url = self.base_url + "/backupServers"
self.bus_json: Dict[str, Any] = self._get_data(bu_url)
# Sets the ID of the backup server
def set_name_id(self, index) -> None:
self.bus_name = self.bus_json['Refs'][index]['Name']
self.bu_id = self.bus_json['Refs'][index]['UID'].split(":")[-1]
# Gets the jobs filtered by schedule enabled
# Need to try this with format entity added
def get_jobs(self, filtered: bool=True) -> None:
if filtered:
job_url = f"{self.base_url}/query?type=Job&filter=ScheduleEnabled==True&JobType==Backup&BackupServerUid=={self.bu_id}"
else:
job_url = f"{self.base_url}/query?type=Job&filter=JobType==Backup&BackupServerUid=={self.bu_id}"
self.job_json = self._get_data(job_url)
self.job_names = [x['Name'] for x in self.job_json['Refs']['Refs']]
self.job_ids: List[Dict[str, str]] = []
for i in self.job_json['Refs']['Refs']:
self.job_ids.append({
"name": i['Name'],
"id": i['UID']
})
# v11 has /api/v1/jobs but not everyone has it
def get_vm_jobs(self) -> None:
# Loops through the job ids and gets back the VM names
self.vms_per_job = []
for i in track(self.job_ids, description="Gettings Job Data"):
# for i in tqdm(self.job_ids):
# 'https://192.168.0.25:9398/api/jobs/urn:veeam:Job:02e7be62-5a36-4e34-b2ca-caff08553c3e/includes'
id = i['id'].split(":")[-1]
cat_vms_url = f"{self.base_url}/jobs/{id}/includes"
cat_vms_json = self._get_data(cat_vms_url)
vm_names: List[str] = []
for k in cat_vms_json['ObjectInJobs']:
vm_names.append(k['Name'])
self.vms_per_job.append({
"name": i['name'],
"vms": vm_names,
"length": len(vm_names)
})
def get_backup_files(self, day_qty: int) -> None:
# Gets the backup ids for all jobs in the timerange and backup server
utc_now = datetime.datetime.utcnow()
days = datetime.timedelta(day_qty)
old_date = utc_now - days
old_date_z = old_date.strftime('%Y-%m-%dT%H:%M:%SZ')
backup_url = f'{self.base_url}/query?type=BackupFile&format=Entities&filter=CreationTimeUTC>="{old_date_z}"&BackupServerUid=={self.bu_id}'
self.backup_json = self._get_data(backup_url)
self.backup_details = self.backup_json['Entities']['BackupFiles']['BackupFiles']
def __sort_buf_ids(self):
# converts the backup ids into their URLs ready for processing
self.bu_urls: List[str] = []
for i in self.ids:
url = f"{self.base_url}/backupFiles/{i}?format=Entity"
self.bu_urls.append(url)
def run_filter_jobs(self) -> None:
# Filters out the backup files to just the ones that have a schedule attached
self.filtered_jobs: List[Dict[str, Any]] = []
for i in self.backup_details:
for j in self.job_names:
job_name = re.split(r'\\| - ', i['Links'][0]['Name'])[0]
if j == job_name:
self.filtered_jobs.append({
"creationTime": i['CreationTimeUtc'],
"name": job_name,
"fileType": i['FileType'],
"fileName": i['Name'],
"backupFile": i['FilePath'],
"DeduplicationRatio": i['DeduplicationRatio'],
"CompressRatio": i['CompressRatio'],
"BackupSize": i['BackupSize'] / 1024**3,
"DataSize": i['DataSize'] / 1024**3
})
def run_capacity_sorter(self) -> None:
# Runs the capacity sorter to get the relevant info from the job data
self.sorted_cap = capacity_sorter(self.jobs_grouped)
for i in self.sorted_cap:
for j in self.vms_per_job:
if i['jobName'] == j['name']:
i['vmQty'] = j['length']
i['vmsInJob'] = j['vms']
def add_vm_details(self) -> None:
# Adds the VM names to the sorted jobs
self.jobs_grouped = []
for i in track(self.job_names, description="Sorting the backups"):
# for i in tqdm(self.job_names):
temp_data = []
for j in self.filtered_jobs:
if i == j['name']:
temp_data.append(j)
self.jobs_grouped.append({
"jobName": i,
"backups": temp_data
})
def add_repo_details(self) -> None:
# Adds the repo name to each job object
backup_url = self.base_url + "/backups"
backup_json = self._get_data(backup_url)
self.bu_uuid = [x['UID'] for x in backup_json['Refs']]
for i in track(self.bu_uuid, description="Add Repo Details"):
# for i in tqdm(self.bu_uuid):
id = i.split(":")[-1]
bu_url = self.base_url + f"/backups/{id}?format=Entity"
res_json = self._get_data(bu_url)
self.backup_details.append(res_json)
for i in self.sorted_cap:
for j in self.backup_details:
if i['jobName'] == j['Name']:
i['repository'] = j['Links'][0]['Name']
def add_v11_details(self, repo_info, job_info):
# Adds the v11 repo tasks to the object, that class needs to be run
# separately
for i in repo_info:
for j in self.sorted_cap:
if i['name'] == j['repository']:
j['repoMaxTasks'] = i['maxTaskCount']
j['repoPerVM'] = i['perVmBackup']
# loops through the v11 job object and adds the proxy info
for i in job_info['data']:
for j in self.sorted_cap:
if i['name'] == j['jobName']:
j['backupProxies'] = i['storage']['backupProxies']
def get_repos(self) -> None:
# Gets the repos information, runs standalone
repo_url = self.base_url + "/query?type=Repository&format=Entities"
repo_json = self._get_data(repo_url)
self.repo_info: List[Any] = []
for i in repo_json['Entities']['Repositories']['Repositories']:
cap = round(i['Capacity'] / 1024**3, 4)
free = round(i['FreeSpace'] / 1024**3, 4)
used = round(cap - free, 4)
data = {
"name": i['Name'],
"CapacityGB": cap,
"FreeSpaceGB": free,
"UsedSpaceGB": used
}
self.repo_info.append(data)