blob: 7080630f731c2607fa9356845f98804494c47439 [file] [log] [blame]
David Kranz6308ec22012-02-22 09:36:48 -05001# Copyright 2011 Quanta Research Cambridge, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""Defines various sub-classes of the `StressTestCase` and
15`PendingAction` class. The sub-classes of StressTestCase implement various
16API calls on the Nova cluster having to do with Server Actions. Each
17sub-class will have a corresponding PendingAction. These pending
18actions veriy that the API call was successful or not."""
19
20
21# system imports
22import random
23import time
24
25# local imports
26import test_case
27import pending_action
David Kranz180fed12012-03-27 14:31:29 -040028from tempest.exceptions import TimeoutException, Duplicate
David Kranz6308ec22012-02-22 09:36:48 -050029from utils.util import *
30
31
32class TestRebootVM(test_case.StressTestCase):
33 """Reboot a server"""
34
35 def run(self, manager, state, *pargs, **kwargs):
36 """
37 Send an HTTP POST request to the nova cluster to reboot a random
38 server. Update state of object in `state` variable to indicate that
39 it is rebooting.
40 `manager` : Manager object
41 `state` : `State` object describing our view of state of cluster
42 `pargs` : positional arguments
43 `kwargs` : keyword arguments, which include:
44 `timeout` : how long to wait before issuing Exception
45 `type` : reboot type [SOFT or HARD] (default is SOFT)
46 """
47
48 vms = state.get_instances()
49 active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
50 # no active vms, so return null
51 if not active_vms:
52 self._logger.info('no ACTIVE instances to reboot')
53 return
54
David Kranz180fed12012-03-27 14:31:29 -040055 _reboot_arg = kwargs.get('type', 'SOFT')
David Kranz6308ec22012-02-22 09:36:48 -050056
57 # select active vm to reboot and then send request to nova controller
58 target = random.choice(active_vms)
59 reboot_target = target[0]
David Kranz180fed12012-03-27 14:31:29 -040060 # It seems that doing a reboot when in reboot is an error.
61 try:
62 response, body = manager.servers_client.reboot(
63 reboot_target['id'],
64 _reboot_arg)
65 except Duplicate:
66 return
David Kranz6308ec22012-02-22 09:36:48 -050067
David Kranz6308ec22012-02-22 09:36:48 -050068 if (response.status != 202):
69 self._logger.error("response: %s" % response)
70 raise Exception
71
David Kranz180fed12012-03-27 14:31:29 -040072 if _reboot_arg == 'SOFT':
73 reboot_state = 'REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050074 else:
David Kranz180fed12012-03-27 14:31:29 -040075 reboot_state = 'HARD_REBOOT'
David Kranz6308ec22012-02-22 09:36:48 -050076
77 self._logger.info('waiting for machine %s to change to %s' %
David Kranz180fed12012-03-27 14:31:29 -040078 (reboot_target['id'], reboot_state))
David Kranz6308ec22012-02-22 09:36:48 -050079
80 return VerifyRebootVM(manager,
81 state,
82 reboot_target,
David Kranz180fed12012-03-27 14:31:29 -040083 reboot_state=reboot_state)
David Kranz6308ec22012-02-22 09:36:48 -050084
85
86class VerifyRebootVM(pending_action.PendingAction):
87 """Class to verify that the reboot completed."""
88 States = enum('REBOOT_CHECK', 'ACTIVE_CHECK')
89
90 def __init__(self, manager, state, target_server,
David Kranz180fed12012-03-27 14:31:29 -040091 reboot_state=None,
David Kranz6308ec22012-02-22 09:36:48 -050092 ip_addr=None):
93 super(VerifyRebootVM, self).__init__(manager,
94 state,
95 target_server)
David Kranz180fed12012-03-27 14:31:29 -040096 self._reboot_state = reboot_state
97 self._retry_state = self.States.REBOOT_CHECK
David Kranz6308ec22012-02-22 09:36:48 -050098
99 def retry(self):
100 """
101 Check to see that the server of interest has actually rebooted. Update
102 state to indicate that server is running again.
103 """
104 # don't run reboot verification if target machine has been
105 # deleted or is going to be deleted
106 if (self._target['id'] not in self._state.get_instances().keys() or
107 self._state.get_instances()[self._target['id']][1] ==
108 'TERMINATING'):
109 self._logger.debug('machine %s is deleted or TERMINATING' %
110 self._target['id'])
111 return True
112
113 if time.time() - self._start_time > self._timeout:
114 raise TimeoutException
115 reboot_state = self._reboot_state
116 if self._retry_state == self.States.REBOOT_CHECK:
117 server_state = self._check_for_status(reboot_state)
118 if server_state == reboot_state:
119 self._logger.info('machine %s ACTIVE -> %s' %
120 (self._target['id'], reboot_state))
121 self._state.set_instance_state(self._target['id'],
122 (self._target, reboot_state)
123 )
124 self._retry_state = self.States.ACTIVE_CHECK
125 elif server_state == 'ACTIVE':
126 # machine must have gone ACTIVE -> REBOOT ->ACTIVE
127 self._retry_state = self.States.ACTIVE_CHECK
128
129 elif self._retry_state == self.States.ACTIVE_CHECK:
130 if not self._check_for_status('ACTIVE'):
131 return False
132 target = self._target
David Kranz180fed12012-03-27 14:31:29 -0400133 self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
134 (target['id'], reboot_state,
135 time.time() - self._start_time))
David Kranz6308ec22012-02-22 09:36:48 -0500136 self._state.set_instance_state(target['id'],
137 (target, 'ACTIVE'))
138
139 return True
140
141# This code needs to be tested against a cluster that supports resize.
142#class TestResizeVM(test_case.StressTestCase):
143# """Resize a server (change flavors)"""
144#
145# def run(self, manager, state, *pargs, **kwargs):
146# """
147# Send an HTTP POST request to the nova cluster to resize a random
148# server. Update `state` to indicate server is rebooting.
149#
150# `manager` : Manager object.
151# `state` : `State` object describing our view of state of cluster
152# `pargs` : positional arguments
153# `kwargs` : keyword arguments, which include:
154# `timeout` : how long to wait before issuing Exception
155# """
156#
157# vms = state.get_instances()
158# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
159# # no active vms, so return null
160# if not active_vms:
161# self._logger.debug('no ACTIVE instances to resize')
162# return
163#
164# target = random.choice(active_vms)
165# resize_target = target[0]
166# print resize_target
167#
168# _timeout = kwargs.get('timeout', 600)
169#
170# # determine current flavor type, and resize to a different type
171# # m1.tiny -> m1.small, m1.small -> m1.tiny
172# curr_size = int(resize_target['flavor']['id'])
173# if curr_size == 1:
174# new_size = 2
175# else:
176# new_size = 1
177# flavor_type = { 'flavorRef': new_size } # resize to m1.small
178#
179# post_body = json.dumps({'resize' : flavor_type})
180# url = '/servers/%s/action' % resize_target['id']
181# (response, body) = manager.request('POST',
182# url,
183# body=post_body)
184#
185# if (response.status != 202):
186# self._logger.error("response: %s" % response)
187# raise Exception
188#
189# state_name = check_for_status(manager, resize_target, 'RESIZE')
190#
191# if state_name == 'RESIZE':
192# self._logger.info('machine %s: ACTIVE -> RESIZE' %
193# resize_target['id'])
194# state.set_instance_state(resize_target['id'],
195# (resize_target, 'RESIZE'))
196#
197# return VerifyResizeVM(manager,
198# state,
199# resize_target,
200# state_name=state_name,
201# timeout=_timeout)
202#
203#class VerifyResizeVM(pending_action.PendingAction):
204# """Verify that resizing of a VM was successful"""
205# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
206#
207# def __init__(self, manager, state, created_server,
208# state_name=None,
209# timeout=300):
210# super(VerifyResizeVM, self).__init__(manager,
211# state,
212# created_server,
213# timeout=timeout)
214# self._retry_state = self.States.VERIFY_RESIZE_CHECK
215# self._state_name = state_name
216#
217# def retry(self):
218# """
219# Check to see that the server was actually resized. And change `state`
220# of server to running again.
221# """
222# # don't run resize if target machine has been deleted
223# # or is going to be deleted
224# if (self._target['id'] not in self._state.get_instances().keys() or
225# self._state.get_instances()[self._target['id']][1] ==
226# 'TERMINATING'):
227# self._logger.debug('machine %s is deleted or TERMINATING' %
228# self._target['id'])
229# return True
230#
231# if time.time() - self._start_time > self._timeout:
232# raise TimeoutException
233#
234# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
235# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
236# # now issue command to CONFIRM RESIZE
237# post_body = json.dumps({'confirmResize' : null})
238# url = '/servers/%s/action' % self._target['id']
239# (response, body) = manager.request('POST',
240# url,
241# body=post_body)
242# if (response.status != 204):
243# self._logger.error("response: %s" % response)
244# raise Exception
245#
246# self._logger.info(
247# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
248# (self._target['id'], time.time() - self._start_time)
249# )
250# state.set_instance_state(self._target['id'],
251# (self._target, 'CONFIRM_RESIZE'))
252#
253# # change states
254# self._retry_state = self.States.ACTIVE_CHECK
255#
256# return False
257#
258# elif self._retry_state == self.States.ACTIVE_CHECK:
259# if not self._check_manager("ACTIVE"):
260# return False
261# else:
262# server = self._manager.get_server(self._target['id'])
263#
264# # Find private IP of server?
265# try:
266# (_, network) = server['addresses'].popitem()
267# ip = network[0]['addr']
268# except KeyError:
269# self._logger.error(
270# 'could not get ip address for machine %s' %
271# self._target['id']
272# )
273# raise Exception
274#
275# self._logger.info(
276# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
277# (self._target['id'], time.time() - self._start_time)
278# )
279# self._state.set_instance_state(self._target['id'],
280# (self._target, 'ACTIVE'))
281#
282# return True
283#
284# else:
285# # should never get here
286# self._logger.error('Unexpected state')
287# raise Exception