今天来看看HotSpotVM的Java堆初始化。

Universe

Java堆的初始化主要由Universe模块来完毕,来看下Universe模块初始化的代码,universe_init

jint universe_init() {
assert(!Universe::_fully_initialized, "called after initialize_vtables");
guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
"LogHeapWordSize is incorrect.");
guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
"oop size is not not a multiple of HeapWord size");
TraceTime timer("Genesis", TraceStartupTime);
GC_locker::lock(); // do not allow gc during bootstrapping
JavaClasses::compute_hard_coded_offsets(); // Get map info from shared archive file.
if (DumpSharedSpaces)
UseSharedSpaces = false; FileMapInfo* mapinfo = NULL;
if (UseSharedSpaces) {
mapinfo = NEW_C_HEAP_OBJ(FileMapInfo, mtInternal);
memset(mapinfo, 0, sizeof(FileMapInfo)); // Open the shared archive file, read and validate the header. If
// initialization files, shared spaces [UseSharedSpaces] are
// disabled and the file is closed. if (mapinfo->initialize()) {
FileMapInfo::set_current_info(mapinfo);
} else {
assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled.");
}
} ////////////////////////////////////////
// initialize_heap()
////////////////////////////////////////
jint status = Universe::initialize_heap();
if (status != JNI_OK) {
return status;
} // We have a heap so create the methodOop caches before
// CompactingPermGenGen::initialize_oops() tries to populate them.
Universe::_finalizer_register_cache = new LatestMethodOopCache();
Universe::_loader_addClass_cache = new LatestMethodOopCache();
Universe::_pd_implies_cache = new LatestMethodOopCache();
Universe::_reflect_invoke_cache = new ActiveMethodOopsCache(); if (UseSharedSpaces) { // Read the data structures supporting the shared spaces (shared
// system dictionary, symbol table, etc.). After that, access to
// the file (other than the mapped regions) is no longer needed, and
// the file is closed. Closing the file does not affect the
// currently mapped regions. CompactingPermGenGen::initialize_oops();
mapinfo->close(); } else {
SymbolTable::create_table();
StringTable::create_table();
ClassLoader::create_package_info_table();
} return JNI_OK;
}

主要就是initialize_heap方法

jint Universe::initialize_heap() {

  ////////////////////////////////////////
// -XX:+UseParallelGC
////////////////////////////////////////
if (UseParallelGC) {
#ifndef SERIALGC
Universe::_collectedHeap = new ParallelScavengeHeap();
#else // SERIALGC
fatal("UseParallelGC not supported in java kernel vm.");
#endif // SERIALGC ////////////////////////////////////////
// -XX:+UseG1GC
////////////////////////////////////////
} else if (UseG1GC) {
#ifndef SERIALGC
G1CollectorPolicy* g1p = new G1CollectorPolicy();
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
Universe::_collectedHeap = g1h;
#else // SERIALGC
fatal("UseG1GC not supported in java kernel vm.");
#endif // SERIALGC } else {
GenCollectorPolicy *gc_policy; ////////////////////////////////////////
// -XX:+UseSerialGC
////////////////////////////////////////
if (UseSerialGC) {
gc_policy = new MarkSweepPolicy(); ////////////////////////////////////////
// -XX:+UseConcMarkSweepGC
////////////////////////////////////////
} else if (UseConcMarkSweepGC) {
#ifndef SERIALGC
if (UseAdaptiveSizePolicy) {
gc_policy = new ASConcurrentMarkSweepPolicy();
} else {
gc_policy = new ConcurrentMarkSweepPolicy();
}
#else // SERIALGC
fatal("UseConcMarkSweepGC not supported in java kernel vm.");
#endif // SERIALGC
} else { // default old generation
gc_policy = new MarkSweepPolicy();
} Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
} ////////////////////////////////////////
// 堆的初始化
////////////////////////////////////////
jint status = Universe::heap()->initialize();
if (status != JNI_OK) {
return status;
} #ifdef _LP64
if (UseCompressedOops) {
// Subtract a page because something can get allocated at heap base.
// This also makes implicit null checking work, because the
// memory+1 page below heap_base needs to cause a signal.
// See needs_explicit_null_check.
// Only set the heap base for compressed oops because it indicates
// compressed oops for pstack code.
bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
if (verbose) {
tty->cr();
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
}
if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
// Can't reserve heap below 32Gb.
Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
if (verbose) {
tty->print(", %s: "PTR_FORMAT,
narrow_oop_mode_to_string(HeapBasedNarrowOop),
Universe::narrow_oop_base());
}
} else {
Universe::set_narrow_oop_base(0);
if (verbose) {
tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
}
#ifdef _WIN64
if (!Universe::narrow_oop_use_implicit_null_checks()) {
// Don't need guard page for implicit checks in indexed addressing
// mode with zero based Compressed Oops.
Universe::set_narrow_oop_use_implicit_null_checks(true);
}
#endif // _WIN64
if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
// Can't reserve heap below 4Gb.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
} else {
Universe::set_narrow_oop_shift(0);
if (verbose) {
tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
}
}
}
if (verbose) {
tty->cr();
tty->cr();
}
}
assert(Universe::narrow_oop_base() == (Universe::heap()->base() - os::vm_page_size()) ||
Universe::narrow_oop_base() == NULL, "invalid value");
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
Universe::narrow_oop_shift() == 0, "invalid value");
#endif // We will never reach the CATCH below since Exceptions::_throw will cause
// the VM to exit if an exception is thrown during initialization ////////////////////////////////////////
// -XX:+UseTLAB
////////////////////////////////////////
if (UseTLAB) {
assert(Universe::heap()->supports_tlab_allocation(),
"Should support thread-local allocation buffers");
ThreadLocalAllocBuffer::startup_initialization();
}
return JNI_OK;
}

主要就是依据各个GC相关的option使用不同类型的CollectedHeap,也就是ParallelScavengeHeapG1CollectedHeapGenCollectedHeap。不同类型的CollectedHeap有不同的CollectorPolicy

// CollectedHeap
// SharedHeap
// GenCollectedHeap
// G1CollectedHeap
// ParallelScavengeHeap

然后初始化所选的CollectedHeapTLAB

以下我们来看看GenCollectedHeap的初始化。

先看下使用CMS时採用的CollectorPolicy,也就是ConcurrentMarkSweepPolicy

ConcurrentMarkSweepPolicy

看下构造函数

ConcurrentMarkSweepPolicy::ConcurrentMarkSweepPolicy() {
initialize_all();
}

initialize_all

  virtual void initialize_all() {
initialize_flags();
initialize_size_info();
initialize_generations();
}

ConcurrentMarkSweepPolicy继承自TwoGenerationCollectorPolicyTwoGenerationCollectorPolicy继承自GenCollectorPolicyGenCollectorPolicy继承自CollectorPolicy

TwoGenerationCollectorPolicy中有这么一段凝视,

// All of hotspot's current collectors are subtypes of this
// class. Currently, these collectors all use the same gen[0],
// but have different gen[1] types. If we add another subtype
// of CollectorPolicy, this class should be broken out into
// its own file.

上面所说的gen[0]就是YoungGen。而gen[1]OldGen。也就是说当前HotSpot所採用的GC算法中,YoungGen都是一样的。

initialize_all

如今来看看initialize_all方法都做了啥。首先是initialize_flags。调用的层次是这种,

//3. TwoGenerationCollectorPolicy::initialize_flags
//2. =>GenCollectorPolicy::initialize_flags
//1. =>CollectorPolicy::initialize_flags

initialize_flags方法用来调整命令行标记所指定的GC相关參数的大小,各自是:

  1. PermGen的大小(PermSizeMaxPermSize)
  2. YoungGen的大小(NewSizeMaxNewSize)
  3. OldGen的大小(OldSize)和MaxHeapSize(-Xmx)

然后是initialize_size_info方法。

//3. TwoGenerationCollectorPolicy::initialize_size_info
//2. =>GenCollectorPolicy::initialize_size_info
//1. =>CollectorPolicy::initialize_size_info

initialize_size_info方法依据调整好的标记设置变量大小,各自是:

  1. _initial_heap_byte_size_min_heap_byte_size_max_heap_byte_size
  2. _initial_gen0_size_min_gen0_size_max_gen0_size
  3. _initial_gen1_size_min_gen1_size_max_gen1_size

最后是initialize_generations方法

void ConcurrentMarkSweepPolicy::initialize_generations() {
initialize_perm_generation(PermGen::ConcurrentMarkSweep);
_generations = new GenerationSpecPtr[number_of_generations()];
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec"); if (ParNewGeneration::in_use()) {
if (UseAdaptiveSizePolicy) {
_generations[0] = new GenerationSpec(Generation::ASParNew,
_initial_gen0_size, _max_gen0_size);
} else {
_generations[0] = new GenerationSpec(Generation::ParNew,
_initial_gen0_size, _max_gen0_size);
}
} else {
_generations[0] = new GenerationSpec(Generation::DefNew,
_initial_gen0_size, _max_gen0_size);
}
if (UseAdaptiveSizePolicy) {
_generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
_initial_gen1_size, _max_gen1_size);
} else {
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_gen1_size, _max_gen1_size);
} if (_generations[0] == NULL || _generations[1] == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
}

GenCollectedHeap

接下来能够看下GenCollectedHeap的初始化了。GenCollectedHeap::initialize

jint GenCollectedHeap::initialize() {
CollectedHeap::pre_initialize(); int i;
_n_gens = gen_policy()->number_of_generations(); // While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); // The heap must be at least as aligned as generations.
size_t gen_alignment = Generation::GenGrain; _gen_specs = gen_policy()->generations();
PermanentGenerationSpec *perm_gen_spec =
collector_policy()->permanent_generation(); size_t heap_alignment = collector_policy()->max_alignment(); // Make sure the sizes are all aligned.
for (i = 0; i < _n_gens; i++) {
_gen_specs[i]->align(gen_alignment);
}
perm_gen_spec->align(heap_alignment); // If we are dumping the heap, then allocate a wasted block of address
// space in order to push the heap to a lower address. This extra
// address range allows for other (or larger) libraries to be loaded
// without them occupying the space required for the shared spaces. if (DumpSharedSpaces) {
uintx reserved = 0;
uintx block_size = 64*1024*1024;
while (reserved < SharedDummyBlockSize) {
char* dummy = os::reserve_memory(block_size);
reserved += block_size;
}
} ////////////////////////////////////////
// 開始内存分配
////////////////////////////////////////
// Allocate space for the heap. char* heap_address;
size_t total_reserved = 0;
int n_covered_regions = 0;
ReservedSpace heap_rs; heap_address = allocate(heap_alignment, perm_gen_spec, &total_reserved,
&n_covered_regions, &heap_rs); if (UseSharedSpaces) {
if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
if (heap_rs.is_reserved()) {
heap_rs.release();
}
FileMapInfo* mapinfo = FileMapInfo::current_info();
mapinfo->fail_continue("Unable to reserve shared region.");
allocate(heap_alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
&heap_rs);
}
} if (!heap_rs.is_reserved()) {
vm_shutdown_during_initialization(
"Could not reserve enough space for object heap");
return JNI_ENOMEM;
} _reserved = MemRegion((HeapWord*)heap_rs.base(),
(HeapWord*)(heap_rs.base() + heap_rs.size())); // It is important to do this in a way such that concurrent readers can't
// temporarily think somethings in the heap. (Seen this happen in asserts.)
_reserved.set_word_size(0);
_reserved.set_start((HeapWord*)heap_rs.base());
size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
- perm_gen_spec->misc_code_size();
_reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
set_barrier_set(rem_set()->bs()); _gch = this; for (i = 0; i < _n_gens; i++) {
ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
UseSharedSpaces, UseSharedSpaces);
_gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
// tag generations in JavaHeap
MemTracker::record_virtual_memory_type((address)this_rs.base(), mtJavaHeap);
heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
}
_perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
// tag PermGen
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); clear_incremental_collection_failed(); #ifndef SERIALGC
// If we are running CMS, create the collector responsible
// for collecting the CMS generations.
if (collector_policy()->is_concurrent_mark_sweep_policy()) {
bool success = create_cms_collector();
if (!success) return JNI_ENOMEM;
}
#endif // SERIALGC return JNI_OK;
}

真正的内存分配动作由allocate方法完毕,

char* GenCollectedHeap::allocate(size_t alignment,
PermanentGenerationSpec* perm_gen_spec,
size_t* _total_reserved,
int* _n_covered_regions,
ReservedSpace* heap_rs){
// Now figure out the total size.
size_t total_reserved = 0;
int n_covered_regions = 0;
const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); for (int i = 0; i < _n_gens; i++) {
total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
n_covered_regions += _gen_specs[i]->n_covered_regions();
} assert(total_reserved % alignment == 0,
err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
SIZE_FORMAT, total_reserved, alignment));
total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
assert(total_reserved % alignment == 0,
err_msg("Perm size; total_reserved=" SIZE_FORMAT ", alignment="
SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
alignment, perm_gen_spec->max_size())); n_covered_regions += perm_gen_spec->n_covered_regions(); // Add the size of the data area which shares the same reserved area
// as the heap, but which is not actually part of the heap.
size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
total_reserved = add_and_check_overflow(total_reserved, misc); if (UseLargePages) {
assert(misc == 0, "CDS does not support Large Pages");
assert(total_reserved != 0, "total_reserved cannot be 0");
assert(is_size_aligned(total_reserved, os::large_page_size()), "Must be");
total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
} // Calculate the address at which the heap must reside in order for
// the shared data to be at the required address. char* heap_address;
if (UseSharedSpaces) { // Calculate the address of the first word beyond the heap.
FileMapInfo* mapinfo = FileMapInfo::current_info();
int lr = CompactingPermGenGen::n_regions - 1;
size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
heap_address = mapinfo->region_base(lr) + capacity; // Calculate the address of the first word of the heap.
heap_address -= total_reserved;
} else {
heap_address = NULL; // any address will do.
if (UseCompressedOops) {
heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
*_total_reserved = total_reserved;
*_n_covered_regions = n_covered_regions;
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address); if (heap_address != NULL && !heap_rs->is_reserved()) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address); if (heap_address != NULL && !heap_rs->is_reserved()) {
// Failed to reserve at specified address again - give up.
heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
assert(heap_address == NULL, "");
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
}
}
return heap_address;
}
} *_total_reserved = total_reserved;
*_n_covered_regions = n_covered_regions;
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address); return heap_address;
}

主要是通过构造ReservedHeapSpace来完毕。

ReservedHeapSpace

ReservedHeapSpace继承自ReservedSpaceReservedSpace的构造函数会调用initialize方法,而initialize方法终于会调用os::reserve_memory来申请内存。

char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
}
return result;
}

Linux下的pd_reserve_memory

char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
size_t alignment_hint) {
return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
}
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
// at 'requested_addr'. If there are existing memory mappings at the same
// location, however, they will be overwritten. If 'fixed' is false,
// 'requested_addr' is only treated as a hint, the return value may or
// may not start from the requested address. Unlike Linux mmap(), this
// function returns NULL to indicate failure.
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
char * addr;
int flags; flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
if (fixed) {
assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
flags |= MAP_FIXED;
} // Map uncommitted pages PROT_READ and PROT_WRITE, change access
// to PROT_EXEC if executable when we commit the page.
addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
flags, -1, 0); if (addr != MAP_FAILED) {
// anon_mmap() should only get called during VM initialization,
// don't need lock (actually we can skip locking even it can be called
// from multiple threads, because _highest_vm_reserved_address is just a
// hint about the upper limit of non-stack memory regions.)
if ((address)addr + bytes > _highest_vm_reserved_address) {
_highest_vm_reserved_address = (address)addr + bytes;
}
} return addr == MAP_FAILED ? NULL : addr;
}

所以最后底层事实上是通过调用anonymous的mmap来申请了内存。

參考资料

HotSpotVM的Java堆实现浅析#1:初始化的更多相关文章

  1. 一步步优化JVM四:决定Java堆的大小以及内存占用

    到目前为止,还没有做明确的优化工作.只是做了初始化选择工作,比如说:JVM部署模型.JVM运行环境.收集哪些垃圾回收器的信息以及需要遵守垃圾回收原则.这一步将介绍如何评估应用需要的内存大小以及Java ...

  2. Java堆、栈和常量池以及相关String的详细讲解(经典中的经典) (转)

    原文链接 : http://www.cnblogs.com/xiohao/p/4296088.html 一:在JAVA中,有六个不同的地方可以存储数据: 1. 寄存器(register). 这是最快的 ...

  3. Java堆、栈和常量池以及相关String的详细讲解

    一:在JAVA中,有六个不同的地方可以存储数据: 1. 寄存器(register). 这是最快的存储区,因为它位于不同于其他存储区的地方——处理器内部.但是寄存器的数量极其有限,所以寄存器由编译器根据 ...

  4. Java堆和栈详解

    Java把内存分成两种,一种叫做栈内存,一种叫做堆内存 在函数中定义的一些基本类型的变量和对象的引用变量都是在函数的栈内存中分配.当在一段代码块中定义一个变量时,java就在栈中为这个变量分配内存空间 ...

  5. (2)java堆内存

    java堆内存结构图 [名词解释]--->eden,so,s1通称为新生代对象储区--->tenured称为老年代对象存储区--->s0和s1也称为from和to区域,是两块大小相等 ...

  6. Java编程陷阱-类成员初始化

    原文地址:http://blog.csdn.net/veryitman/article/details/6450523 如果你忽略Java的细节,恐怕你的代码会充满bug,下面讨论关于类成员初始化问题 ...

  7. Java 堆内存(Heap)[转]

    将jvm内存很不错的文章,转自 堆(Heap)又被称为:优先队列(Priority Queue),是计算机科学中一类特殊的数据结构的统称.堆通常是一个可以被看做一棵树的数组对象.在队列中,调度程序反复 ...

  8. Java堆和栈的区别和介绍,JVM的堆和栈

    一.Java的堆内存和栈内存 Java把内存划分成两种:一种是堆内存,一种是栈内存.   堆:主要用于存储实例化的对象,数组.由JVM动态分配内存空间.一个JVM只有一个堆内存,线程是可以共享数据的. ...

  9. java 堆 栈 方法区的简单分析

    Java里的堆(heap)栈(stack)和方法区(method) 基础数据类型直接在栈空间分配, 方法的形式参数,直接在栈空间分配,当方法调用完成后从栈空间回收.   引用数据类型,需要用new来创 ...

随机推荐

  1. C# AES 加解密处理

    引言 这是一个有关AES加解密的方法类 一.设置AES加解密密钥:下面列出自己分配的三类密钥 private const string UserKey = "roshan-2015-user ...

  2. Java使用HttpURLConnection上传文件(转)

    从普通Web页面上传文件很简单,只需要在form标签叫上enctype="multipart/form-data"即可,剩余工作便都交给浏览器去完成数据收集并发送Http请求.但是 ...

  3. 题解 洛谷 P3381 【【模板】最小费用最大流】

    发了网络流,再来一发费用流 能做费用流的,网络流自然做得来,但在这还是不要脸的安利一下自己的博客(里面也有网络流的题解): 点我 扯远了... 费用流,就是在不炸水管的情况下求源点到汇点的最小费用. ...

  4. springboot 错误页面的配置

    springboot的错误页面,只需在templates下新建error文件夹,将404.500等错误页面放进去即可, springboot会自动去里面查找

  5. NYIST 1108 最低的惩罚

    最低的惩罚 时间限制:3000 ms  |  内存限制:65535 KB 难度:3   描述 那么现在问题就来了... 给你N(1=<N<=15)个任务,每个任务有一个截止完成时间t(1= ...

  6. STL_算法_查找算法(binary_search、includes)

    C++ Primer 学习中.. . 简单记录下我的学习过程 (代码为主) 全部容器适用(O(log(n)))     已序区间查找算法 binary_search             //二分查 ...

  7. invalid in the select list because it is not contained in either an aggregate function or the GROUP BY clause

    Column 'dbo.tbm_vie_View.ViewID' is invalid in the select list because it is not contained in either ...

  8. 最短路径----SPFA算法

    求最短路径的算法有许多种,除了排序外,恐怕是ACM界中解决同一类问题算法最多的了.最熟悉的无疑是Dijkstra,接着是Bellman-Ford,它们都可以求出由一个源点向其他各点的最短路径:如果我们 ...

  9. PermissionError: [Errno 13] in python

    出现该错误,首先尝试以管理员身份运行 cmd.exe 程序,然后关闭所有的与 python 相关的进程. 1. open 打开一个文件夹(目录),而不是文件 这一错误一般发生在使用 open函数对文件 ...

  10. BZOJ 3240 构造矩阵+矩阵快速幂

    思路: ax+b cx+d 构造矩阵+矩阵快速幂 (需要加各种特判,,,,我好像加少了- ) //By SiriusRen #include <cstdio> #include <c ...