nova卸载volume源码分析
基于llvm +iscsi协议进行分析
1、客戶端接受请求并路由到 VolumeAttachmentController
其对应的restfull请求格式如下:
delete /servers/{server_id}/os-volume_attachments/{volume_id}
nova-api处理该请求的入口函数为 nova.api.openstack.compute.volumes.VolumeAttachmentController.delete
nova/api/openstack/compute/volumes.py
from nova.compute import api as compute
class VolumeAttachmentController(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API()
self.volume_api = cinder.API()
super(VolumeAttachmentController, self).__init__() @wsgi.response(202)
@wsgi.expected_errors((400, 403, 404, 409))
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
context.can(va_policies.POLICY_ROOT % 'delete')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id,-------根据虚机uuid获取instance实例对象
expected_attrs=['device_metadata'])
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
_check_request_version(req, '2.20', 'detach_volume',
server_id, instance.vm_state)
try:
volume = self.volume_api.get(context, volume_id)-----------调用cinderclient,根据卷uuid获取卷实例对象
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message()) try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(-------获取block_device_mapping表 中该 instance挂在该卷的映射信息
context, volume_id, instance.uuid)
except exception.VolumeBDMNotFound:
msg = (_("Instance %(instance)s is not attached "
"to volume %(volume)s") %
{'instance': server_id, 'volume': volume_id})
raise exc.HTTPNotFound(explanation=msg) if bdm.is_root:
msg = _("Cannot detach a root device volume")
raise exc.HTTPBadRequest(explanation=msg) try:
self.compute_api.detach_volume(context, instance, volume)----s1 nova-api处理过程
.....
s1 nova-api处理过程详解
nova api服务调用nova-compute服务对外的呈现接口
nova.compute.api.API.detach_volume
nova/compute/api.py
class API(base.Base):
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
self._detach_volume_shelved_offloaded(context, instance, volume)
else:
self._detach_volume(context, instance, volume)-----一般情况走这个分支 def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
try:
self.volume_api.begin_detaching(context, volume['id'])------调用cinderclient,更新cinder数据库中卷的状态为detaching
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
attachments = volume.get('attachments', {})
attachment_id = None
if attachments and instance.uuid in attachments:
attachment_id = attachments[instance.uuid]['attachment_id']
self._record_action_start(context, instance, instance_actions.DETACH_VOLUME)-----记录对虚机的一次操作
self.compute_rpcapi.detach_volume(context, instance=instance,------调用nova-compute rpc客户端发送卸载卷的rpc请求
volume_id=volume['id'], attachment_id=attachment_id)
2、nova-compute服务接受发送过来的卸载卷rpc请求并处理,其处理入口函数为
nova/compute/manager.py
class ComputeManager(manager.Manager): def detach_volume(self, context, volume_id, instance, attachment_id):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param attachment_id: The volume attachment_id for the given instance
and volume.
"""
@utils.synchronized(instance.uuid)
def do_detach_volume(context, volume_id, instance, attachment_id):
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
self._detach_volume(context, bdm, instance,
attachment_id=attachment_id) do_detach_volume(context, volume_id, instance, attachment_id)----实际调用的是 _detach_volume def _detach_volume(self, context, bdm, instance, destroy_bdm=True,
attachment_id=None):
"""Detach a volume from an instance.
"""
volume_id = bdm.volume_id
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_DETACH,
phase=fields.NotificationPhase.START,
volume_id=volume_id) self._notify_volume_usage_detach(context, instance, bdm) LOG.info('Detaching volume %(volume_id)s',
{'volume_id': volume_id}, instance=instance) driver_bdm = driver_block_device.convert_volume(bdm)------获取bdm driver驱动,该参数的值为nova.virt.block_device.DriverVolumeBlockDevice
driver_bdm.detach(context, instance, self.volume_api, self.driver,-------- s1 调用bdm 驱动执行卸载卷操作,self.driver = nova.virt.libvirt.driver.LibvirtDriver
attachment_id=attachment_id, destroy_bdm=destroy_bdm)
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_DETACH,
phase=fields.NotificationPhase.END,
volume_id=volume_id) if 'tag' in bdm and bdm.tag:
self._delete_disk_metadata(instance, bdm)
if destroy_bdm:
bdm.destroy()-----设置nova 数据库中该卷的bdm deleted标志位删除状态
s1 BDM driver detach操作
nova/virt/block_device.py
class DriverVolumeBlockDevice(DriverBlockDevice):
def detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False): volume = self._get_volume(context, volume_api, self.volume_id)
if volume.get('shared_targets', False):
# Lock the detach call using the provided service_uuid.
@utils.synchronized(volume['service_uuid'])
def _do_locked_detach(*args, **_kwargs):
self._do_detach(*args, **_kwargs)
_do_locked_detach(context, instance, volume_api, virt_driver,
attachment_id, destroy_bdm)
else:
# We don't need to (or don't know if we need to) lock.
self._do_detach(context, instance, volume_api, virt_driver,-----调试走了该分支
attachment_id, destroy_bdm) def _do_detach(self, context, instance, volume_api, virt_driver,
attachment_id=None, destroy_bdm=False):
"""Private method that actually does the detach. This is separate from the detach() method so the caller can optionally
lock this call.
"""
volume_id = self.volume_id # Only attempt to detach and disconnect from the volume if the instance
# is currently associated with the local compute host.
if CONF.host == instance.host:
self.driver_detach(context, instance, volume_api, virt_driver)--------s1.1 虚机端卸载卷操作
elif not destroy_bdm:
LOG.debug("Skipping driver_detach during remote rebuild.",
instance=instance)
elif destroy_bdm:
LOG.error("Unable to call for a driver detach of volume "
"%(vol_id)s due to the instance being "
"registered to the remote host %(inst_host)s.",
{'vol_id': volume_id,
'inst_host': instance.host}, instance=instance) # NOTE(jdg): For now we need to actually inspect the bdm for an
# attachment_id as opposed to relying on what may have been passed
# in, we want to force usage of the old detach flow for now and only
# use the new flow when we explicitly used it for the attach.
if not self['attachment_id']:
connector = virt_driver.get_volume_connector(instance)
connection_info = self['connection_info']
if connection_info and not destroy_bdm and (
connector.get('host') != instance.host):
# If the volume is attached to another host (evacuate) then
# this connector is for the wrong host. Use the connector that
# was stored in connection_info instead (if we have one, and it
# is for the expected host).
stashed_connector = connection_info.get('connector')
if not stashed_connector:
# Volume was attached before we began stashing connectors
LOG.warning("Host mismatch detected, but stashed "
"volume connector not found. Instance host is "
"%(ihost)s, but volume connector host is "
"%(chost)s.",
{'ihost': instance.host,
'chost': connector.get('host')})
elif stashed_connector.get('host') != instance.host:
# Unexpected error. The stashed connector is also not
# matching the needed instance host.
LOG.error("Host mismatch detected in stashed volume "
"connector. Will use local volume connector. "
"Instance host is %(ihost)s. Local volume "
"connector host is %(chost)s. Stashed volume "
"connector host is %(schost)s.",
{'ihost': instance.host,
'chost': connector.get('host'),
'schost': stashed_connector.get('host')})
else:
# Fix found. Use stashed connector.
LOG.debug("Host mismatch detected. Found usable stashed "
"volume connector. Instance host is %(ihost)s. "
"Local volume connector host was %(chost)s. "
"Stashed volume connector host is %(schost)s.",
{'ihost': instance.host,
'chost': connector.get('host'),
'schost': stashed_connector.get('host')})
connector = stashed_connector volume_api.terminate_connection(context, volume_id, connector)-----s1.2 调用cinderclient,发送os-terminate_connection请求,cinder端取消后端存储卷的挂载关系
volume_api.detach(context.elevated(), volume_id, instance.uuid,attachment_id)-----s1.3 调用cinderclient,发送os-detach请求,更新cinder 数据库中,卷的状态
else:
volume_api.attachment_delete(context, self['attachment_id']) s1.1 虚机端卸载卷操作
self.driver_detach(context, instance, volume_api, virt_driver)
nova/virt/block_device.py
class DriverVolumeBlockDevice(DriverBlockDevice):
def driver_detach(self, context, instance, volume_api, virt_driver):
#virt_driver的值为 nova.virt.libvirt.driver.LibvirtDriver
connection_info = self['connection_info']
mp = self['mount_device']
volume_id = self.volume_id LOG.info('Attempting to driver detach volume %(volume_id)s from '
'mountpoint %(mp)s', {'volume_id': volume_id, 'mp': mp},
instance=instance)
try:
if not virt_driver.instance_exists(instance):
LOG.warning('Detaching volume from unknown instance',
instance=instance) encryption = encryptors.get_encryption_metadata(context,
volume_api, volume_id, connection_info)
virt_driver.detach_volume(context, connection_info, instance, mp,------------s1.1.1 调用libvirt里面的实际驱动进行处理
encryption=encryption)
except exception.DiskNotFound as err:
LOG.warning('Ignoring DiskNotFound exception while '
'detaching volume %(volume_id)s from '
'%(mp)s : %(err)s',
{'volume_id': volume_id, 'mp': mp,
'err': err}, instance=instance)
except exception.DeviceDetachFailed as err:
with excutils.save_and_reraise_exception():
LOG.warning('Guest refused to detach volume %(vol)s',
{'vol': volume_id}, instance=instance)
volume_api.roll_detaching(context, volume_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to detach volume '
'%(volume_id)s from %(mp)s',
{'volume_id': volume_id, 'mp': mp},
instance=instance)
volume_api.roll_detaching(context, volume_id)
s1.1.1 详解
nova/virt/libvirt/driver.py
class LibvirtDriver(driver.ComputeDriver):
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)-------获取虚机的xml信息
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
wait_for_detach = guest.detach_device_with_retry(guest.get_disk,disk_dev,live=live)----s1删除xml文件中挂载卷的信息
wait_for_detach()
self._disconnect_volume(context, connection_info, instance,-------s2主机端执行issci logout操作
encryption=encryption)
s2主机端执行issci logout操作
nova/virt/libvirt/driver.py
class LibvirtDriver(driver.ComputeDriver):
def _disconnect_volume(self, context, connection_info, instance,
encryption=None):
self._detach_encryptor(context, connection_info, encryption=encryption)
if self._should_disconnect_target(context, connection_info, instance):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.disconnect_volume(connection_info, instance)
else:
LOG.info("Detected multiple connections on this host for volume: "
"%s, skipping target disconnect.",
driver_block_device.get_volume_id(connection_info),
instance=instance)
根据block_device_mapping中,connection_info的 driver_volume_type 类型来获取对应的驱动,
由于使用的是iscsi协议,因此找的 LibvirtISCSIVolumeDriver
最终走的是vol_driver.disconnect_volume=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver.disconnect_volume
nova卸载volume源码分析的更多相关文章
- nova挂载volume源码分析
当nova volume-attach instance_uuid volume_uuid 执行后,主要流程如下: 使用的存储类型是lvm+iscis 1.nova client解析该命令行,通过re ...
- nova创建虚拟机源码分析系列之七 传入参数转换成内部id
上一篇博文将nova创建虚机的流程推进到了/compute/api.py中的create()函数,接下来就继续分析. 在分析之前简单介绍nova组件源码的架构.以conductor组件为例: 每个组件 ...
- nova创建虚拟机源码分析系列之五 nova源码分发实现
前面讲了很多nova restful的功能,无非是为本篇博文分析做铺垫.本节说明nova创建虚拟机的请求发送到openstack之后,nova是如何处理该条URL的请求,分析到处理的类. nova对于 ...
- nova创建虚拟机源码分析系列之三 PasteDeploy
上一篇博文介绍WSGI在nova创建虚拟机过程的作用是解析URL,是以一个最简单的例子去给读者有一个印象.在openstack中URL复杂程度也大大超过上一个例子.所以openstack使用了Past ...
- nova创建虚拟机源码分析系列之一 restful api
开始学习openstack源码,源码文件多,分支不少.按照学习的方法走通一条线是最好的,而网上推荐的最多的就是nova创建虚机的过程.从这一条线入手,能够贯穿openstack核心服务.写博文仅做学习 ...
- nova创建虚拟机源码分析系列之六 api入口create方法
openstack 版本:Newton 注:博文图片采用了很多大牛博客图片,仅作为总结学习,非商用.该图全面的说明了nova创建虚机的过程,从逻辑的角度清晰的描述了前端请求创建虚拟机之后发生的一系列反 ...
- nova创建虚拟机源码分析系列之四 nova代码模拟
在前面的三篇博文中,介绍了restful和SWGI的实现.结合restful和WSGI配置就能够简单的实现nova服务模型的最简单的操作. 如下的内容是借鉴网上博文,因为写的很巧妙,将nova管理虚拟 ...
- nova创建虚拟机源码分析系列之八 compute创建虚机
/conductor/api.py _build_instance() /conductor/rpcapi.py _build_instance() 1 构造一些数据类型2 修改一些api版本信息 ...
- Openstack Nova 源码分析 — 使用 VCDriver 创建 VMware Instance
目录 目录 前言 流程图 nova-compute vCenter 前言 在上一篇Openstack Nova 源码分析 - Create instances (nova-conductor阶段)中, ...
随机推荐
- Android集成JPush(极光推送)
目前只是简单的集成 1.在极光推送官网注册用户 2.创建应用 3.配置包名,获得APPKEY 去设置 输入应用包名 确定然后返回查看APPKEY 3.在应用中集成极光推送 用的jcenter自动集成的 ...
- SQLyog无操作一段时间后重新操作会卡死问题(解决办法)
这种是因为一段时间不操作后,服务器将空闲连接丢弃了,而客户端(sqlyog)不知道,导致长时间无响应,而超时之后,sqlyog 使用了新的连接,所以又可以顺畅操作了. 将会话空闲时间默认改为自定义,填 ...
- PHP系列之钩子
PHP 提供的钩子 PHP 和 Zend Engine 为扩展提供了许多不同的钩子,这些扩展允许扩展开发人员以 PHP userland 无法提供的方式控制 PHP 运行时. 本章将展示各种钩子和从扩 ...
- PHP 超级全局变量讲解
PHP 超级全局变量 超级全局变量在PHP 4.1.0之后被启用, 是PHP系统中自带的变量,在一个脚本的全部作用域中都可用. PHP 超级全局变量 PHP中预定义了几个超级全局变量(superglo ...
- [草稿]Skill 中的map
https://www.cnblogs.com/yeungchie/ Skill 中的map map mapc mapcan mapcar mapcon mapinto maplist
- Jdbc与Dao和Javabean的区别
JDBC(Java Data Base Connectivity,java数据库连接)是一种用于执行SQL语句的Java API,可以为多种关系数据库提供统一访问,它由一组用Java语言编写的类和接口 ...
- x86架构:从实模式进入保护模式
详细的过程说明参考:(1) https://www.cnblogs.com/Philip-Tell-Truth/p/5211248.html (2)x86汇编:从实模式到保护模式 这里简化一下 ...
- mnist手写数字识别——深度学习入门项目(tensorflow+keras+Sequential模型)
前言 今天记录一下深度学习的另外一个入门项目——<mnist数据集手写数字识别>,这是一个入门必备的学习案例,主要使用了tensorflow下的keras网络结构的Sequential模型 ...
- 实验11——java线程模拟卖票
package cn.tedu.demo; /** * @author 赵瑞鑫 E-mail:1922250303@qq.com * @version 1.0 * @创建时间:2020年7月31日 下 ...
- Android BottomNavigationView的用法
BottomNavigationView是相当于一个导航的标签,但是它的形式就是像QQ,微信之类的界面 这三个图标就是BottomNavigationView的体现. 至于写出后怎样绑定这三个界面,就 ...