pycuda学习过程中的一些发现,cuda函数的初始化要在cuda内存空间初始化之后,否则会报错
参考:
https://www.cnblogs.com/devilmaycry812839668/p/15348610.html
最近在看WarpDrive的代码,其中cuda上运行的代码是使用pycuda库进行连通的,使用pycuda可以很好的在python环境中调用cuda的代码,但是在使用中发现一些事情,那就是cuda函数的初始化要放在cuda内存空间初始化之后,否则会有报错。
代码:(可以正常运行的代码)
import numpy as np from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_data_manager = push_random_data(num_agents, num_envs)
cuda_function_manager, increment_function = cuda_func_init()
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''
报错的代码:
import numpy as np from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": 0, #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''
报错信息:
Traceback (most recent call last):
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 145, in <module>
source_code
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 116, in push_random_data_and_increment_timer
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
File "/home/xxxxxx/anaconda3/envs/warp_drive/lib/python3.7/timeit.py", line 177, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 116, in <lambda>
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
File "/home/xxxxxx/warp-drive/devil_make/tutorial-1-warp_drive_basics.py", line 97, in increment_data
grid=cuda_function_manager.grid
File "/home/xxxxxx/anaconda3/envs/warp_drive/lib/python3.7/site-packages/pycuda/driver.py", line 480, in function_call
func._set_block_shape(*block)
pycuda._driver.LogicError: cuFuncSetBlockShape failed: invalid resource handle
由此可知,在使用pycuda时,如果cuda函数初始化之前没有对cuda内存初始化则会报错:
报错信息:
pycuda._driver.LogicError: cuFuncSetBlockShape failed: invalid resource handle
如果再cuda函数初始化之前对cuda内存初始化那么就不会报错:
代码:
import numpy as np from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) cuda_data_manager = push_random_data(num_agents, num_envs) ###
cuda_function_manager, increment_function = cuda_func_init() ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": '0', #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''
神奇的是不论在cuda函数初始化之前对cuda内存初始化多大空间的内存都不会再报错,这也是该问题神奇的地方所在。
如下代码:
import numpy as np from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.managers.function_manager import (
CUDAFunctionManager, CUDALogController, CUDASampler, CUDAEnvironmentReset
)
from warp_drive.utils.data_feed import DataFeed source_code = """
// A function to demonstrate how to manipulate data on the GPU.
// This function increments each the random data array we pushed to the GPU before.
// Each index corresponding to (env_id, agent_id) in the array is incremented by "agent_id + env_id".
// Everything inside the if() loop runs in parallel for each agent and environment.
//
extern "C"{
__global__ void cuda_increment(
float* data,
int num_agents
)
{
int env_id = blockIdx.x;
int agent_id = threadIdx.x;
if (agent_id < num_agents){
int array_index = env_id * num_agents + agent_id;
int increment = env_id + agent_id;
data[array_index] += increment;
}
}
}
""" from timeit import Timer def push_random_data_and_increment_timer(
num_runs=1,
num_envs=2,
num_agents=3,
source_code=None
): assert source_code is not None def push_random_data(num_agents, num_envs):
# Initialize the CUDA data manager
cuda_data_manager = CUDADataManager(
num_agents=num_agents,
num_envs=num_envs,
episode_length=100
) # Create random data
random_data = np.random.rand(num_envs, num_agents) # Push data from host to device
data_feed = DataFeed()
data_feed.add_data(
name="random_data",
data=random_data,
)
data_feed.add_data(
name="num_agents",
data=num_agents
)
cuda_data_manager.push_data_to_device(data_feed) return cuda_data_manager # Initialize the CUDA function manager
def cuda_func_init():
cuda_function_manager = CUDAFunctionManager(
num_agents=num_agents, #cuda_data_manager.meta_info("n_agents"),
num_envs=num_envs #cuda_data_manager.meta_info("n_envs")
) # Load source code and initialize function
cuda_function_manager.load_cuda_from_source_code(
source_code,
default_functions_included=False
)
cuda_function_manager.initialize_functions(["cuda_increment"])
increment_function = cuda_function_manager._get_function("cuda_increment") return cuda_function_manager, increment_function def increment_data(cuda_data_manager, cuda_function_manager, increment_function):
increment_function(
cuda_data_manager.device_data("random_data"),
cuda_data_manager.device_data("num_agents"),
block=cuda_function_manager.block,
grid=cuda_function_manager.grid
) # set variable
# cuda_data_manager = push_random_data(num_agents, num_envs) # cuda function init
# cuda_function_manager, increment_function = cuda_func_init() # cuda function run
# increment_data(cuda_data_manager, cuda_function_manager, increment_function) #data_push_time = Timer(lambda: push_random_data(num_agents, num_envs)).timeit(number=num_runs) push_random_data(1, 1) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
print(cuda_data_manager.pull_data_from_device('random_data')) return {
"data push times": '0', #data_push_time,
"code run time": program_run_time
} num_runs = 1000
times = {} for scenario in [
(1, 1),
(1, 100),
(1, 1000),
(100, 1000),
(1000, 1000)
]:
num_envs, num_agents = scenario
times.update(
{
f"envs={num_envs}, agents={num_agents}":
push_random_data_and_increment_timer(
num_runs,
num_envs,
num_agents,
source_code
)
}
) print(f"Times for {num_runs} function calls")
print("*"*40)
for key, value in times.items():
print(f"{key:30}: mean data push times: {value['data push times']:10.5}s,\t mean increment times: {value['code run time']:10.5}s") '''
print(cuda_data_manager._meta_info)
print(cuda_data_manager._host_data)
print(cuda_data_manager._device_data_pointer)
print(cuda_data_manager._scalar_data_list)
print(cuda_data_manager._reset_data_list)
print(cuda_data_manager._log_data_list)
print(cuda_data_manager._device_data_via_torch)
print(cuda_data_manager._shared_constants)
print(cuda_data_manager._shape)
print(cuda_data_manager._dtype) print(tensor_on_device)
time.sleep(300) '''
核心代码:
push_random_data(1, 1) cuda_function_manager, increment_function = cuda_func_init() ###
cuda_data_manager = push_random_data(num_agents, num_envs) ###
program_run_time = Timer(lambda: increment_data(cuda_data_manager, cuda_function_manager, increment_function)).timeit(number=num_runs)
下面代码为cuda的内存申请,即使是较小的内存申请也是可以是下面的cuda函数初始化正常运行,如果再cuda函数初始化之前没有任何对cudsa内存申请的操作那就会报错。
push_random_data(1, 1)
初始化cuda内存,cuda内存的申请操作:
push_random_data(1, 1)
cuda函数的初始化操作:
cuda_function_manager, increment_function = cuda_func_init() ###
cuda 函数的执行:
increment_data(cuda_data_manager, cuda_function_manager, increment_function)
pycuda学习过程中的一些发现,cuda函数的初始化要在cuda内存空间初始化之后,否则会报错的更多相关文章
- 如果在Yii中,使用AR查询,不直接写sql,则在使用的时候会报错
如果在Yii中,使用AR查询,不直接写sql,则在使用的时候会报错 Student::find() ->select("id,name,from_unixtime(create_tim ...
- JAVA_用_JCO连接_SAP,实现调用SAP_的_RFC_函数(整理)(附一篇看起来比较全面的说明)(JCO报错信息)
// 获取RFC返回的字段值 11 JCoParameterList exportParam = function.getExportParameterList(); 12 String exPara ...
- vue中"‘webpack-dev-server’不是内部或外部命令,也不是可运行的程序"的报错
在vue项目中发现了这个报错 解决办法将项目里的“node_modules”文件夹删除,然后重新运行cnpm install
- 在myeclipse中maven项目关于ssh整合时通过pom.xml导入依赖是pom.xml头部会报错
错误如下 ArtifactTransferException: Failure to transfer org.springframework:spring-jdbc:jar:3.0.5.RELEAS ...
- free()函数释放一段分配的内存之陷阱
朋友们对malloc函数应该是比较熟悉了,此函数功能是分配一段内存地址,并且将内存地址给一个指针变量,最后记得再调用free函数释放这段内存地址就可以了,标准的流程对吧,好像没什么问题.但是按照此标准 ...
- Unity C# 调用 C++ DLL 并在 DLL 中调用 C# 的回调函数
Unity C# 调用 C++ DLL 并在 DLL 中调用 C# 的回调函数~~~ 呵呵... 看着有点晕.. 再解释一下就是 在Unity中 使用 C# 调用 C++ 写的 DLL, 但是在 ...
- LoadRunner中常用的字符串操作函数
LoadRunner中常用的字符串操作函数有: strcpy(destination_string, source_string); strc ...
- 【Python】从简单案列中揭示常用内置函数以及数据类型
前面提到了BIF(内置函数)这个概念,什么是内置函数,就是python已经定义好的函数,不需要人为再自己定义,直接拿来就可以用的函数,那么都有哪些BIF呢? 可以在交互式界面(IDLE)输入这段代码, ...
- C语言calloc()函数:分配内存空间并初始化——stm32中的应用
经常在代码中看到使用malloc来分配,然后memset清零,其实calloc更加方便,一句顶两句~ 头文件:#include <stdlib.h> calloc() 函数用来动态地分配内 ...
- 2018最新win10 安装tensorflow1.4(GPU/CPU)+cuda8.0+cudnn8.0-v6 + keras 安装CUDA失败 导入tensorflow失败报错问题解决
原文作者:aircraft 原文链接:https://www.cnblogs.com/DOMLX/p/9747019.html 基本开发环境搭建 1. Microsoft Windows 版本 关于W ...
随机推荐
- C# .NET 拉卡拉支付接口解析 付款码支付 条码支付
C# .NET 拉卡拉支付接口解析 付款码支付 条码支付 被扫 反扫 刷卡支付 B扫C. 简要: 1.测试环境给的私钥是PKCS8.签名用. 2.CRT证书用X509Certificate2 读取出 ...
- 动态生成的 select option 无法选中,设置值
使用jQuery的 .val('22') 给select 设置值时不生效. 原因:select是动态生成的,在DOM还没生成完之前就调用了.val('22'). 解决方法:动态生成的ajax请求改成同 ...
- 请写出常用的linux指令
a.cd /home 进入 '/ home' 目录' b.cd .. 返回上一级目录 c.cd ../.. 返回上两级目录 d.mkdir dir1 创建一个叫做 'dir1' 的目录' e.mkdi ...
- 【大道至简】NetCore3.1快速开发框架一:介绍
新的一年开始,祝大家2020新年快乐! 去年开源了一个基于NetCore2.2的框架,好多好基友还是比较喜欢的 github地址:https://github.com/feiyit/FytSoaCms ...
- springboot增加@EnableAsync注解,否则方法中的@Async注解没有生效
springboot增加@EnableAsync注解,否则方法中的@Async注解没有生效. @EnableFeignClients(basePackages = {"com.test&qu ...
- .NET Core WebApi接口ip限流实践
.NET Core WebApi接口ip限流实践 前言 之前一直想实现接口限流,但一直没去实现,然后刚好看到一篇文章是基于AspNetCoreRateLimit 组件的限流策略.这个组件不做多的介绍, ...
- 韦东山freeRTOS系列教程之【第五章】队列(queue)
目录 系列教程总目录 概述 5.1 队列的特性 5.1.1 常规操作 5.1.2 传输数据的两种方法 5.1.3 队列的阻塞访问 5.2 队列函数 5.2.1 创建 5.2.2 复位 5.2.3 删除 ...
- documen.write 和 innerHTML 的区别?
document.write只能重绘整个页面,innerHTML可以重绘页面的一部分. 1. ducument.write使用举例html文档: <!doctype html> <h ...
- GAIA: 一个严苛的智能体基准
简要概括 经过一些实验,我们对 Transformers 智能体构建智能体系统的性能印象深刻,因此我们想看看它有多好!我们使用一个 用库构建的代码智能体 在 GAIA 基准上进行测试,这可以说是最困难 ...
- @Autowired和@Resource有哪些区别
一.注解的作用 @Autowired和@Resource都是用来实现Bean的自动注入功能. 二.@Autowired和@Resource的区别 1.所属的包不同 @Autowired是Spring的 ...