用arm-linux-gcc v4.3.4交叉编译Qt4.8.3
1.解压缩
#tar zxvf qt-everywhere-opensource-src-4.8.3.tar.gz
2. configure
#mkdir buildarm-static
#cd buildarm-static
#../qt-everywhere-opensource-src-4.8.3/configure \
-opensource -confirm-license \
-release \
-prefix /usr/qt-4.83-arm-static \
-embedded arm -little-endian -static \
-qt-mouse-tslib -L $PWD/lib -I $PWD/include/tslib \
-no-largefile \
-no-accessibility \
-no-qt3support \
-no-xmlpatterns \
-no-multimedia -audio-backend \
-no-phonon -no-phonon-backend \
-no-svg \
-no-webkit \
-no-javascript-jit \
-no-script -no-scripttools \
-no-declarative \
-no-declarative-debug \
-platform qws/linux-x86-g++ \
-no-mmx -no-3dnow \
-no-sse -no-sse2 -no-sse3 -no-ssse3 -no-sse4.1 -no-sse4.2 \
-no-avx -no-neon\
-qt-zlib \
-no-gif -no-libtiff \
-qt-libpng \
-no-libmng \
-qt-libjpeg \
-qt-freetype -no-openssl \
-nomake tools -nomake examples -nomake demos -nomake docs -nomake translations \
-no-nis -no-cups -no-iconv -no-pch -no-dbus \
-reduce-relocations \
-no-gtkstyle -no-nas-sound -no-opengl -no-openvg -no-sm \
-no-xshape -no-xvideo -no-xsync -no-xinerama -no-xcursor \
-no-xfixes -no-xrandr -no-xrender -no-mitshm -no-fontconfig \
-no-xinput -no-xkb -no-glib \
2>&1 | tee ../qteconfigarm-static.log
3.compile and install
#gmake 2>&1 | tee ../qtemake-static.log
#gmake install
4. error
/qt-everywhere-opensource-src-4.8.5/src/3rdparty/javascriptcore/JavaScriptCore/runtime/Collector.cpp:662: error: 'pthread_getattr_np' was not declared in this scope
修改Collector.cpp文件:
/*
* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/ #include "config.h"
#include "Collector.h" #include "ArgList.h"
#include "CallFrame.h"
#include "CodeBlock.h"
#include "CollectorHeapIterator.h"
#include "Interpreter.h"
#include "JSArray.h"
#include "JSGlobalObject.h"
#include "JSLock.h"
#include "JSONObject.h"
#include "JSString.h"
#include "JSValue.h"
#include "JSZombie.h"
#include "MarkStack.h"
#include "Nodes.h"
#include "Tracing.h"
#include <algorithm>
#include <limits.h>
#include <setjmp.h>
#include <stdlib.h>
#include <wtf/FastMalloc.h>
#include <wtf/HashCountedSet.h>
#include <wtf/UnusedParam.h>
#include <wtf/VMTags.h> #if OS(DARWIN) #include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/task.h>
#include <mach/thread_act.h>
#include <mach/vm_map.h> #elif OS(WINDOWS) #include <windows.h>
#include <malloc.h> #elif OS(HAIKU) #include <OS.h> #elif OS(UNIX) #include <stdlib.h>
#if !OS(HAIKU)
#include <sys/mman.h>
#endif
#include <unistd.h> #if defined(QT_LINUXBASE)
#include <dlfcn.h>
#endif #if defined(__UCLIBC__)
// versions of uClibc 0.9.32 and below with linuxthreads.old do not have
// pthread_getattr_np or pthread_attr_getstack.
#if __UCLIBC_MAJOR__ == 0 && \
(__UCLIBC_MINOR__ < || \
(__UCLIBC_MINOR__ == && __UCLIBC_SUBLEVEL__ <= )) && \
defined(__LINUXTHREADS_OLD__)
#define UCLIBC_USE_PROC_SELF_MAPS 1
#include <stdio_ext.h>
extern int *__libc_stack_end;
#endif
#endif #if OS(SOLARIS)
#include <thread.h>
#else
#include <pthread.h>
#endif #if HAVE(PTHREAD_NP_H)
#include <pthread_np.h>
#endif #if OS(QNX)
#include <fcntl.h>
#include <sys/procfs.h>
#include <stdio.h>
#include <errno.h>
#endif #endif #define COLLECT_ON_EVERY_ALLOCATION 0 using std::max; namespace JSC { // tunable parameters const size_t GROWTH_FACTOR = ;
const size_t LOW_WATER_FACTOR = ;
const size_t ALLOCATIONS_PER_COLLECTION = ;
// This value has to be a macro to be used in max() without introducing
// a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
#define MIN_ARRAY_SIZE (static_cast<size_t>(14)) #if ENABLE(JSC_MULTIPLE_THREADS) #if OS(DARWIN)
typedef mach_port_t PlatformThread;
#elif OS(WINDOWS)
typedef HANDLE PlatformThread;
#endif class Heap::Thread {
public:
Thread(pthread_t pthread, const PlatformThread& platThread, void* base)
: posixThread(pthread)
, platformThread(platThread)
, stackBase(base)
{
} Thread* next;
pthread_t posixThread;
PlatformThread platformThread;
void* stackBase;
}; #endif Heap::Heap(JSGlobalData* globalData)
: m_markListSet()
#if ENABLE(JSC_MULTIPLE_THREADS)
, m_registeredThreads()
, m_currentThreadRegistrar()
#endif
#if OS(SYMBIAN)
, m_blockallocator(WTF::AlignedBlockAllocator::instance(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE))
#endif
, m_globalData(globalData)
{
ASSERT(globalData);
memset(&m_heap, , sizeof(CollectorHeap));
allocateBlock();
} Heap::~Heap()
{
// The destroy function must already have been called, so assert this.
ASSERT(!m_globalData);
} void Heap::destroy()
{
JSLock lock(SilenceAssertionsOnly); if (!m_globalData)
return; ASSERT(!m_globalData->dynamicGlobalObject);
ASSERT(!isBusy()); // The global object is not GC protected at this point, so sweeping may delete it
// (and thus the global data) before other objects that may use the global data.
RefPtr<JSGlobalData> protect(m_globalData); delete m_markListSet;
m_markListSet = ; freeBlocks(); #if ENABLE(JSC_MULTIPLE_THREADS)
if (m_currentThreadRegistrar) {
int error = pthread_key_delete(m_currentThreadRegistrar);
ASSERT_UNUSED(error, !error);
} MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
for (Heap::Thread* t = m_registeredThreads; t;) {
Heap::Thread* next = t->next;
delete t;
t = next;
}
#endif
m_globalData = ;
} NEVER_INLINE CollectorBlock* Heap::allocateBlock()
{
#if OS(DARWIN)
vm_address_t address = ;
vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, , FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
#elif OS(SYMBIAN)
void* address = m_blockallocator.alloc();
if (!address)
CRASH();
#elif OS(WINCE)
void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#elif OS(WINDOWS)
#if COMPILER(MINGW) && !COMPILER(MINGW64)
void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
#else
void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
#endif
memset(address, , BLOCK_SIZE);
#elif HAVE(POSIX_MEMALIGN)
void* address;
posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
#else #if ENABLE(JSC_MULTIPLE_THREADS)
#error Need to initialize pagesize safely.
#endif
static size_t pagesize = getpagesize(); size_t extra = ;
if (BLOCK_SIZE > pagesize)
extra = BLOCK_SIZE - pagesize; void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -, );
uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult); size_t adjust = ;
if ((address & BLOCK_OFFSET_MASK) != )
adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK); if (adjust > )
munmap(reinterpret_cast<char*>(address), adjust); if (adjust < extra)
munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust); address += adjust;
#endif // Initialize block. CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
block->heap = this;
clearMarkBits(block); Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
for (size_t i = ; i < HeapConstants::cellsPerBlock; ++i)
new (block->cells + i) JSCell(dummyMarkableCellStructure); // Add block to blocks vector. size_t numBlocks = m_heap.numBlocks;
if (m_heap.usedBlocks == numBlocks) {
static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
if (numBlocks > maxNumBlocks)
CRASH();
numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
m_heap.numBlocks = numBlocks;
m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
}
m_heap.blocks[m_heap.usedBlocks++] = block; return block;
} NEVER_INLINE void Heap::freeBlock(size_t block)
{
m_heap.didShrink = true; ObjectIterator it(m_heap, block);
ObjectIterator end(m_heap, block + );
for ( ; it != end; ++it)
(*it)->~JSCell();
freeBlockPtr(m_heap.blocks[block]); // swap with the last block so we compact as we go
m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - ];
m_heap.usedBlocks--; if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
}
} NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
{
#if OS(DARWIN)
vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
#elif OS(SYMBIAN)
m_blockallocator.free(reinterpret_cast<void*>(block));
#elif OS(WINCE)
VirtualFree(block, , MEM_RELEASE);
#elif OS(WINDOWS)
#if COMPILER(MINGW) && !COMPILER(MINGW64)
__mingw_aligned_free(block);
#else
_aligned_free(block);
#endif
#elif HAVE(POSIX_MEMALIGN)
free(block);
#else
munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
#endif
} void Heap::freeBlocks()
{
ProtectCountSet protectedValuesCopy = m_protectedValues; clearMarkBits();
ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end();
for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
markCell(it->first); m_heap.nextCell = ;
m_heap.nextBlock = ;
DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
DeadObjectIterator end(m_heap, m_heap.usedBlocks);
for ( ; it != end; ++it)
(*it)->~JSCell(); ASSERT(!protectedObjectCount()); protectedValuesEnd = protectedValuesCopy.end();
for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
it->first->~JSCell(); for (size_t block = ; block < m_heap.usedBlocks; ++block)
freeBlockPtr(m_heap.blocks[block]); fastFree(m_heap.blocks); memset(&m_heap, , sizeof(CollectorHeap));
} void Heap::recordExtraCost(size_t cost)
{
// Our frequency of garbage collection tries to balance memory use against speed
// by collecting based on the number of newly created values. However, for values
// that hold on to a great deal of memory that's not in the form of other JS values,
// that is not good enough - in some cases a lot of those objects can pile up and
// use crazy amounts of memory without a GC happening. So we track these extra
// memory costs. Only unusually large objects are noted, and we only keep track
// of this extra cost until the next GC. In garbage collected languages, most values
// are either very short lived temporaries, or have extremely long lifetimes. So
// if a large value survives one garbage collection, there is not much point to
// collecting more frequently as long as it stays alive. if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / ) {
// If the last iteration through the heap deallocated blocks, we need
// to clean up remaining garbage before marking. Otherwise, the conservative
// marking mechanism might follow a pointer to unmapped memory.
if (m_heap.didShrink)
sweep();
reset();
}
m_heap.extraCost += cost;
} void* Heap::allocate(size_t s)
{
typedef HeapConstants::Block Block;
typedef HeapConstants::Cell Cell; ASSERT(JSLock::lockCount() > );
ASSERT(JSLock::currentThreadIsHoldingLock());
ASSERT_UNUSED(s, s <= HeapConstants::cellSize); ASSERT(m_heap.operationInProgress == NoOperation); #if COLLECT_ON_EVERY_ALLOCATION
collectAllGarbage();
ASSERT(m_heap.operationInProgress == NoOperation);
#endif allocate: // Fast case: find the next garbage cell and recycle it. do {
ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
do {
ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
Cell* cell = block->cells + m_heap.nextCell; m_heap.operationInProgress = Allocation;
JSCell* imp = reinterpret_cast<JSCell*>(cell);
imp->~JSCell();
m_heap.operationInProgress = NoOperation; ++m_heap.nextCell;
return cell;
}
} while (++m_heap.nextCell != HeapConstants::cellsPerBlock);
m_heap.nextCell = ;
} while (++m_heap.nextBlock != m_heap.usedBlocks); // Slow case: reached the end of the heap. Mark live objects and start over. reset();
goto allocate;
} void Heap::resizeBlocks()
{
m_heap.didShrink = false; size_t usedCellCount = markedCells();
size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - ) / HeapConstants::cellsPerBlock; size_t maxCellCount = 1.25f * minCellCount;
size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - ) / HeapConstants::cellsPerBlock; if (m_heap.usedBlocks < minBlockCount)
growBlocks(minBlockCount);
else if (m_heap.usedBlocks > maxBlockCount)
shrinkBlocks(maxBlockCount);
} void Heap::growBlocks(size_t neededBlocks)
{
ASSERT(m_heap.usedBlocks < neededBlocks);
while (m_heap.usedBlocks < neededBlocks)
allocateBlock();
} void Heap::shrinkBlocks(size_t neededBlocks)
{
ASSERT(m_heap.usedBlocks > neededBlocks); // Clear the always-on last bit, so isEmpty() isn't fooled by it.
for (size_t i = ; i < m_heap.usedBlocks; ++i)
m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - ); for (size_t i = ; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
if (m_heap.blocks[i]->marked.isEmpty()) {
freeBlock(i);
} else
++i;
} // Reset the always-on last bit.
for (size_t i = ; i < m_heap.usedBlocks; ++i)
m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - );
} #if OS(WINCE)
void* g_stackBase = ; inline bool isPageWritable(void* page)
{
MEMORY_BASIC_INFORMATION memoryInformation;
DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation)); // return false on error, including ptr outside memory
if (result != sizeof(memoryInformation))
return false; DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
return protect == PAGE_READWRITE
|| protect == PAGE_WRITECOPY
|| protect == PAGE_EXECUTE_READWRITE
|| protect == PAGE_EXECUTE_WRITECOPY;
} static void* getStackBase(void* previousFrame)
{
// find the address of this stack frame by taking the address of a local variable
bool isGrowingDownward;
void* thisFrame = (void*)(&isGrowingDownward); isGrowingDownward = previousFrame < &thisFrame;
static DWORD pageSize = ;
if (!pageSize) {
SYSTEM_INFO systemInfo;
GetSystemInfo(&systemInfo);
pageSize = systemInfo.dwPageSize;
} // scan all of memory starting from this frame, and return the last writeable page found
register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - ));
if (isGrowingDownward) {
while (currentPage > ) {
// check for underflow
if (currentPage >= (char*)pageSize)
currentPage -= pageSize;
else
currentPage = ;
if (!isPageWritable(currentPage))
return currentPage + pageSize;
}
return ;
} else {
while (true) {
// guaranteed to complete because isPageWritable returns false at end of memory
currentPage += pageSize;
if (!isPageWritable(currentPage))
return currentPage;
}
}
}
#endif #if OS(HPUX)
struct hpux_get_stack_base_data
{
pthread_t thread;
_pthread_stack_info info;
}; static void *hpux_get_stack_base_internal(void *d)
{
hpux_get_stack_base_data *data = static_cast<hpux_get_stack_base_data *>(d); // _pthread_stack_info_np requires the target thread to be suspended
// in order to get information about it
pthread_suspend(data->thread); // _pthread_stack_info_np returns an errno code in case of failure
// or zero on success
if (_pthread_stack_info_np(data->thread, &data->info)) {
// failed
return ;
} pthread_continue(data->thread);
return data;
} static void *hpux_get_stack_base()
{
hpux_get_stack_base_data data;
data.thread = pthread_self(); // We cannot get the stack information for the current thread
// So we start a new thread to get that information and return it to us
pthread_t other;
pthread_create(&other, , hpux_get_stack_base_internal, &data); void *result;
pthread_join(other, &result);
if (result)
return data.info.stk_stack_base;
return ;
}
#endif #if OS(QNX)
static inline void *currentThreadStackBaseQNX()
{
static void* stackBase = ;
static size_t stackSize = ;
static pthread_t stackThread;
pthread_t thread = pthread_self();
if (stackBase == || thread != stackThread) {
struct _debug_thread_info threadInfo;
memset(&threadInfo, , sizeof(threadInfo));
threadInfo.tid = pthread_self();
int fd = open("/proc/self", O_RDONLY);
if (fd == -) {
LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
return ;
}
devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), );
close(fd);
stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
stackSize = threadInfo.stksize;
ASSERT(stackBase);
stackThread = thread;
}
return static_cast<char*>(stackBase) + stackSize;
}
#endif static inline void* currentThreadStackBase()
{
#if OS(DARWIN)
pthread_t thread = pthread_self();
return pthread_get_stackaddr_np(thread);
#elif OS(WINCE)
AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
MutexLocker locker(mutex);
if (g_stackBase)
return g_stackBase;
else {
int dummy;
return getStackBase(&dummy);
}
#elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
// offset 0x18 from the FS segment register gives a pointer to
// the thread information block for the current thread
NT_TIB* pTib;
__asm {
MOV EAX, FS:[18h]
MOV pTib, EAX
}
return static_cast<void*>(pTib->StackBase);
#elif OS(WINDOWS) && CPU(X86_64) && (COMPILER(MSVC) || COMPILER(GCC))
// FIXME: why only for MSVC?
PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
return reinterpret_cast<void*>(pTib->StackBase);
#elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
// offset 0x18 from the FS segment register gives a pointer to
// the thread information block for the current thread
NT_TIB* pTib;
asm ( "movl %%fs:0x18, %0\n"
: "=r" (pTib)
);
return static_cast<void*>(pTib->StackBase);
#elif OS(HPUX)
return hpux_get_stack_base();
#elif OS(QNX)
AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
MutexLocker locker(mutex);
return currentThreadStackBaseQNX();
#elif OS(SOLARIS)
stack_t s;
thr_stksegment(&s);
return s.ss_sp;
#elif OS(AIX)
pthread_t thread = pthread_self();
struct __pthrdsinfo threadinfo;
char regbuf[];
int regbufsize = sizeof regbuf; if (pthread_getthrds_np(&thread, PTHRDSINFO_QUERY_ALL,
&threadinfo, sizeof threadinfo,
®buf, ®bufsize) == )
return threadinfo.__pi_stackaddr; return ;
#elif OS(OPENBSD)
pthread_t thread = pthread_self();
stack_t stack;
pthread_stackseg_np(thread, &stack);
return stack.ss_sp;
#elif OS(SYMBIAN)
TThreadStackInfo info;
RThread thread;
thread.StackInfo(info);
return (void*)info.iBase;
#elif OS(HAIKU)
thread_info threadInfo;
get_thread_info(find_thread(NULL), &threadInfo);
return threadInfo.stack_end;
#elif OS(UNIX)
#ifdef UCLIBC_USE_PROC_SELF_MAPS
// Read /proc/self/maps and locate the line whose address
// range contains __libc_stack_end.
FILE *file = fopen("/proc/self/maps", "r");
if (!file)
return ;
__fsetlocking(file, FSETLOCKING_BYCALLER);
char *line = NULL;
size_t lineLen = ;
while (!feof_unlocked(file)) {
if (getdelim(&line, &lineLen, '\n', file) <= )
break; long from;
long to;
if (sscanf (line, "%lx-%lx", &from, &to) != )
continue;
if (from <= (long)__libc_stack_end && (long)__libc_stack_end < to) {
fclose(file);
free(line);
#ifdef _STACK_GROWS_UP
return (void *)from;
#else
return (void *)to;
#endif
}
}
fclose(file);
free(line);
return ;
#else
AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
MutexLocker locker(mutex);
static void* stackBase = ;
static size_t stackSize = ;
static pthread_t stackThread;
pthread_t thread = pthread_self();
if (stackBase == || thread != stackThread) { #if defined(QT_LINUXBASE)
// LinuxBase is missing pthread_getattr_np - resolve it once at runtime instead
// see http://bugs.linuxbase.org/show_bug.cgi?id=2364
typedef int (*GetAttrPtr)(pthread_t, pthread_attr_t *);
static int (*pthread_getattr_np_ptr)(pthread_t, pthread_attr_t *) = ;
if (!pthread_getattr_np_ptr)
*(void **)&pthread_getattr_np_ptr = dlsym(RTLD_DEFAULT, "pthread_getattr_np");
#endif
pthread_attr_t sattr;
pthread_attr_init(&sattr);
#if HAVE(PTHREAD_NP_H) || OS(NETBSD)
// e.g. on FreeBSD 5.4, neundorf@kde.org
pthread_attr_get_np(thread, &sattr);
#elif defined(QT_LINUXBASE)
if (pthread_getattr_np_ptr)
pthread_getattr_np_ptr(thread, &sattr);
#else
// FIXME: this function is non-portable; other POSIX systems may have different np alternatives
pthread_getattr_np(thread, &sattr);
#endif
int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
(void)rc; // FIXME: Deal with error code somehow? Seems fatal.
ASSERT(stackBase);
pthread_attr_destroy(&sattr);
stackThread = thread;
}
return static_cast<char*>(stackBase) + stackSize;
#endif
#else
#error Need a way to get the stack base on this platform
#endif
} #if ENABLE(JSC_MULTIPLE_THREADS) static inline PlatformThread getCurrentPlatformThread()
{
#if OS(DARWIN)
return pthread_mach_thread_np(pthread_self());
#elif OS(WINDOWS)
return pthread_getw32threadhandle_np(pthread_self());
#endif
} void Heap::makeUsableFromMultipleThreads()
{
if (m_currentThreadRegistrar)
return; int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
if (error)
CRASH();
} void Heap::registerThread()
{
ASSERT(!m_globalData->mainThreadOnly || isMainThread()); if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
return; pthread_setspecific(m_currentThreadRegistrar, this);
Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); MutexLocker lock(m_registeredThreadsMutex); thread->next = m_registeredThreads;
m_registeredThreads = thread;
} void Heap::unregisterThread(void* p)
{
if (p)
static_cast<Heap*>(p)->unregisterThread();
} void Heap::unregisterThread()
{
pthread_t currentPosixThread = pthread_self(); MutexLocker lock(m_registeredThreadsMutex); if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
Thread* t = m_registeredThreads;
m_registeredThreads = m_registeredThreads->next;
delete t;
} else {
Heap::Thread* last = m_registeredThreads;
Heap::Thread* t;
for (t = m_registeredThreads->next; t; t = t->next) {
if (pthread_equal(t->posixThread, currentPosixThread)) {
last->next = t->next;
break;
}
last = t;
}
ASSERT(t); // If t is NULL, we never found ourselves in the list.
delete t;
}
} #else // ENABLE(JSC_MULTIPLE_THREADS) void Heap::registerThread()
{
} #endif inline bool isPointerAligned(void* p)
{
return (((intptr_t)(p) & (sizeof(char*) - )) == );
} // Cell size needs to be a power of two for isPossibleCell to be valid.
COMPILE_ASSERT(sizeof(CollectorCell) % == , Collector_cell_size_is_power_of_two); #if USE(JSVALUE32)
static bool isHalfCellAligned(void *p)
{
return (((intptr_t)(p) & (CELL_MASK >> )) == );
} static inline bool isPossibleCell(void* p)
{
return isHalfCellAligned(p) && p;
} #else static inline bool isCellAligned(void *p)
{
return (((intptr_t)(p) & CELL_MASK) == );
} static inline bool isPossibleCell(void* p)
{
return isCellAligned(p) && p;
}
#endif // USE(JSVALUE32) void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
{
if (start > end) {
void* tmp = start;
start = end;
end = tmp;
} ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
ASSERT(isPointerAligned(start));
ASSERT(isPointerAligned(end)); char** p = static_cast<char**>(start);
char** e = static_cast<char**>(end); CollectorBlock** blocks = m_heap.blocks;
while (p != e) {
char* x = *p++;
if (isPossibleCell(x)) {
size_t usedBlocks;
uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
xAsBits &= CELL_ALIGN_MASK; uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - );
if (offset > lastCellOffset)
continue; CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
usedBlocks = m_heap.usedBlocks;
for (size_t block = ; block < usedBlocks; block++) {
if (blocks[block] != blockAddr)
continue;
markStack.append(reinterpret_cast<JSCell*>(xAsBits));
markStack.drain();
}
}
}
} void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
{
void* dummy;
void* stackPointer = &dummy;
void* stackBase = currentThreadStackBase();
markConservatively(markStack, stackPointer, stackBase);
} #if COMPILER(GCC)
#define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
#else
#define REGISTER_BUFFER_ALIGNMENT
#endif void Heap::markCurrentThreadConservatively(MarkStack& markStack)
{
// setjmp forces volatile registers onto the stack
jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
#if COMPILER(MSVC)
#pragma warning(push)
#pragma warning(disable: 4611)
#endif
setjmp(registers);
#if COMPILER(MSVC)
#pragma warning(pop)
#endif markCurrentThreadConservativelyInternal(markStack);
} #if ENABLE(JSC_MULTIPLE_THREADS) static inline void suspendThread(const PlatformThread& platformThread)
{
#if OS(DARWIN)
thread_suspend(platformThread);
#elif OS(WINDOWS)
SuspendThread(platformThread);
#else
#error Need a way to suspend threads on this platform
#endif
} static inline void resumeThread(const PlatformThread& platformThread)
{
#if OS(DARWIN)
thread_resume(platformThread);
#elif OS(WINDOWS)
ResumeThread(platformThread);
#else
#error Need a way to resume threads on this platform
#endif
} typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit #if OS(DARWIN) #if CPU(X86)
typedef i386_thread_state_t PlatformThreadRegisters;
#elif CPU(X86_64)
typedef x86_thread_state64_t PlatformThreadRegisters;
#elif CPU(PPC)
typedef ppc_thread_state_t PlatformThreadRegisters;
#elif CPU(PPC64)
typedef ppc_thread_state64_t PlatformThreadRegisters;
#elif CPU(ARM)
typedef arm_thread_state_t PlatformThreadRegisters;
#else
#error Unknown Architecture
#endif #elif OS(WINDOWS) && CPU(X86)
typedef CONTEXT PlatformThreadRegisters;
#else
#error Need a thread register struct for this platform
#endif static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
{
#if OS(DARWIN) #if CPU(X86)
unsigned user_count = sizeof(regs)/sizeof(int);
thread_state_flavor_t flavor = i386_THREAD_STATE;
#elif CPU(X86_64)
unsigned user_count = x86_THREAD_STATE64_COUNT;
thread_state_flavor_t flavor = x86_THREAD_STATE64;
#elif CPU(PPC)
unsigned user_count = PPC_THREAD_STATE_COUNT;
thread_state_flavor_t flavor = PPC_THREAD_STATE;
#elif CPU(PPC64)
unsigned user_count = PPC_THREAD_STATE64_COUNT;
thread_state_flavor_t flavor = PPC_THREAD_STATE64;
#elif CPU(ARM)
unsigned user_count = ARM_THREAD_STATE_COUNT;
thread_state_flavor_t flavor = ARM_THREAD_STATE;
#else
#error Unknown Architecture
#endif kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);
if (result != KERN_SUCCESS) {
WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,
"JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
CRASH();
}
return user_count * sizeof(usword_t);
// end OS(DARWIN) #elif OS(WINDOWS) && CPU(X86)
regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
GetThreadContext(platformThread, ®s);
return sizeof(CONTEXT);
#else
#error Need a way to get thread registers on this platform
#endif
} static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
{
#if OS(DARWIN) #if __DARWIN_UNIX03 #if CPU(X86)
return reinterpret_cast<void*>(regs.__esp);
#elif CPU(X86_64)
return reinterpret_cast<void*>(regs.__rsp);
#elif CPU(PPC) || CPU(PPC64)
return reinterpret_cast<void*>(regs.__r1);
#elif CPU(ARM)
return reinterpret_cast<void*>(regs.__sp);
#else
#error Unknown Architecture
#endif #else // !__DARWIN_UNIX03 #if CPU(X86)
return reinterpret_cast<void*>(regs.esp);
#elif CPU(X86_64)
return reinterpret_cast<void*>(regs.rsp);
#elif CPU(PPC) || CPU(PPC64)
return reinterpret_cast<void*>(regs.r1);
#else
#error Unknown Architecture
#endif #endif // __DARWIN_UNIX03 // end OS(DARWIN)
#elif CPU(X86) && OS(WINDOWS)
return reinterpret_cast<void*>((uintptr_t) regs.Esp);
#else
#error Need a way to get the stack pointer for another thread on this platform
#endif
} void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
{
suspendThread(thread->platformThread); PlatformThreadRegisters regs;
size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); // mark the thread's registers
markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); void* stackPointer = otherThreadStackPointer(regs);
markConservatively(markStack, stackPointer, thread->stackBase); resumeThread(thread->platformThread);
} #endif void Heap::markStackObjectsConservatively(MarkStack& markStack)
{
markCurrentThreadConservatively(markStack); #if ENABLE(JSC_MULTIPLE_THREADS) if (m_currentThreadRegistrar) { MutexLocker lock(m_registeredThreadsMutex); #ifndef NDEBUG
// Forbid malloc during the mark phase. Marking a thread suspends it, so
// a malloc inside markChildren() would risk a deadlock with a thread that had been
// suspended while holding the malloc lock.
fastMallocForbid();
#endif
// It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
// and since this is a shared heap, they are real locks.
for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
if (!pthread_equal(thread->posixThread, pthread_self()))
markOtherThreadConservatively(markStack, thread);
}
#ifndef NDEBUG
fastMallocAllow();
#endif
}
#endif
} void Heap::protect(JSValue k)
{
ASSERT(k);
ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); if (!k.isCell())
return; m_protectedValues.add(k.asCell());
} void Heap::unprotect(JSValue k)
{
ASSERT(k);
ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); if (!k.isCell())
return; m_protectedValues.remove(k.asCell());
} void Heap::markProtectedObjects(MarkStack& markStack)
{
ProtectCountSet::iterator end = m_protectedValues.end();
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
markStack.append(it->first);
markStack.drain();
}
} void Heap::clearMarkBits()
{
for (size_t i = ; i < m_heap.usedBlocks; ++i)
clearMarkBits(m_heap.blocks[i]);
} void Heap::clearMarkBits(CollectorBlock* block)
{
// allocate assumes that the last cell in every block is marked.
block->marked.clearAll();
block->marked.set(HeapConstants::cellsPerBlock - );
} size_t Heap::markedCells(size_t startBlock, size_t startCell) const
{
ASSERT(startBlock <= m_heap.usedBlocks);
ASSERT(startCell < HeapConstants::cellsPerBlock); if (startBlock >= m_heap.usedBlocks)
return ; size_t result = ;
result += m_heap.blocks[startBlock]->marked.count(startCell);
for (size_t i = startBlock + ; i < m_heap.usedBlocks; ++i)
result += m_heap.blocks[i]->marked.count(); return result;
} void Heap::sweep()
{
ASSERT(m_heap.operationInProgress == NoOperation);
if (m_heap.operationInProgress != NoOperation)
CRASH();
m_heap.operationInProgress = Collection; #if !ENABLE(JSC_ZOMBIES)
Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
#endif DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
DeadObjectIterator end(m_heap, m_heap.usedBlocks);
for ( ; it != end; ++it) {
JSCell* cell = *it;
#if ENABLE(JSC_ZOMBIES)
if (!cell->isZombie()) {
const ClassInfo* info = cell->classInfo();
cell->~JSCell();
new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
Heap::markCell(cell);
}
#else
cell->~JSCell();
// Callers of sweep assume it's safe to mark any cell in the heap.
new (cell) JSCell(dummyMarkableCellStructure);
#endif
} m_heap.operationInProgress = NoOperation;
} void Heap::markRoots()
{
#ifndef NDEBUG
if (m_globalData->isSharedInstance) {
ASSERT(JSLock::lockCount() > );
ASSERT(JSLock::currentThreadIsHoldingLock());
}
#endif ASSERT(m_heap.operationInProgress == NoOperation);
if (m_heap.operationInProgress != NoOperation)
CRASH(); m_heap.operationInProgress = Collection; MarkStack& markStack = m_globalData->markStack; // Reset mark bits.
clearMarkBits(); // Mark stack roots.
markStackObjectsConservatively(markStack);
m_globalData->interpreter->registerFile().markCallFrames(markStack, this); // Mark explicitly registered roots.
markProtectedObjects(markStack); #if QT_BUILD_SCRIPT_LIB
if (m_globalData->clientData)
m_globalData->clientData->mark(markStack);
#endif // Mark misc. other roots.
if (m_markListSet && m_markListSet->size())
MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
if (m_globalData->exception)
markStack.append(m_globalData->exception);
m_globalData->smallStrings.markChildren(markStack);
if (m_globalData->functionCodeBlockBeingReparsed)
m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack);
if (m_globalData->firstStringifierToMark)
JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark); markStack.drain();
markStack.compact(); m_heap.operationInProgress = NoOperation;
} size_t Heap::objectCount() const
{
return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks
+ m_heap.nextCell // allocated cells in current block
+ markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap
- m_heap.usedBlocks; // 1 cell per block is a dummy sentinel
} void Heap::addToStatistics(Heap::Statistics& statistics) const
{
statistics.size += m_heap.usedBlocks * BLOCK_SIZE;
statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize);
} Heap::Statistics Heap::statistics() const
{
Statistics statistics = { , };
addToStatistics(statistics);
return statistics;
} size_t Heap::globalObjectCount()
{
size_t count = ;
if (JSGlobalObject* head = m_globalData->head) {
JSGlobalObject* o = head;
do {
++count;
o = o->next();
} while (o != head);
}
return count;
} size_t Heap::protectedGlobalObjectCount()
{
size_t count = ;
if (JSGlobalObject* head = m_globalData->head) {
JSGlobalObject* o = head;
do {
if (m_protectedValues.contains(o))
++count;
o = o->next();
} while (o != head);
} return count;
} size_t Heap::protectedObjectCount()
{
return m_protectedValues.size();
} static const char* typeName(JSCell* cell)
{
if (cell->isString())
return "string";
#if USE(JSVALUE32)
if (cell->isNumber())
return "number";
#endif
if (cell->isGetterSetter())
return "gettersetter";
if (cell->isAPIValueWrapper())
return "value wrapper";
if (cell->isPropertyNameIterator())
return "for-in iterator";
ASSERT(cell->isObject());
const ClassInfo* info = cell->classInfo();
return info ? info->className : "Object";
} HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
{
HashCountedSet<const char*>* counts = new HashCountedSet<const char*>; ProtectCountSet::iterator end = m_protectedValues.end();
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
counts->add(typeName(it->first)); return counts;
} bool Heap::isBusy()
{
return m_heap.operationInProgress != NoOperation;
} void Heap::reset()
{
JAVASCRIPTCORE_GC_BEGIN(); markRoots(); JAVASCRIPTCORE_GC_MARKED(); m_heap.nextCell = ;
m_heap.nextBlock = ;
m_heap.nextNumber = ;
m_heap.extraCost = ;
#if ENABLE(JSC_ZOMBIES)
sweep();
#endif
resizeBlocks(); JAVASCRIPTCORE_GC_END();
} void Heap::collectAllGarbage()
{
JAVASCRIPTCORE_GC_BEGIN(); // If the last iteration through the heap deallocated blocks, we need
// to clean up remaining garbage before marking. Otherwise, the conservative
// marking mechanism might follow a pointer to unmapped memory.
if (m_heap.didShrink)
sweep(); markRoots(); JAVASCRIPTCORE_GC_MARKED(); m_heap.nextCell = ;
m_heap.nextBlock = ;
m_heap.nextNumber = ;
m_heap.extraCost = ;
sweep();
resizeBlocks(); JAVASCRIPTCORE_GC_END();
} LiveObjectIterator Heap::primaryHeapBegin()
{
return LiveObjectIterator(m_heap, );
} LiveObjectIterator Heap::primaryHeapEnd()
{
return LiveObjectIterator(m_heap, m_heap.usedBlocks);
} } // namespace JSC
error 2:
修改
qt-everywhere-opensource-src-4.8.3\src\corelib\thread\qthread_unix.cpp
的117行
#if defined(Q_OS_LINUX) && defined(__GLIBC__) && (defined(Q_CC_GNU) || defined(Q_CC_INTEL))
=>
#if defined(Q_OS_LINUX) && defined(__GLIBC__) && ((defined(Q_CC_GNU) && defined(_GLIBCXX_HAVE_TLS)) || defined(Q_CC_INTEL))
用arm-linux-gcc v4.3.4交叉编译Qt4.8.3的更多相关文章
- Ubuntu12.4 64位 安装 arm linux gcc 4.3.2
一.下载arm linux gcc 4.3.2 http://pan.baidu.com/share/link?shareid=1575352696&uk=2754759285&fid ...
- linux(debian) arm-linux-g++ v4.5.1交叉编译 embedded arm 版本的QtWebkit (browser) 使用qt 4.8.6 版本
最近需要做一个项目 在arm 架构的linux下 没有桌面环境的情况下拉起 有界面的浏览器使用. 考虑用qt 的界面和 qtwebikt 的库去实现这一系列操作. 本文参考: Qt移植到ARM Lin ...
- linux(debian) arm-linux-g++ v4.5.1交叉编译 embedded arm 版本的QtWebkit (browser) 使用qt 4.8.6 版本 以及x64上编译qt
最近需要做一个项目 在arm 架构的linux下 没有桌面环境的情况下拉起 有界面的浏览器使用. 考虑用qt 的界面和 qtwebikt 的库去实现这一系列操作. 本文参考: Qt移植到ARM Lin ...
- Windows平台交叉编译Arm Linux平台的QT5.7库
1.准备交叉编译环境 环境说明:Windows 7 64位 此过程需要: (1)Qt库开源代码,我使用的是5.7.0版本: (2)Perl语言环境5.12版本以上: (3)Python语言环境 2.7 ...
- linux gcc 区分32位或64位编译 && 请问arm存储,是以小端格式还是以大端格式?
linux gcc 区分32位或64位编译 Linux系统下程序如何区分是64位系统还是32位系统 经过对include的翻查,最后确定gcc以__i386__来 进行32位编码,而以__x86_ ...
- 构建 ARM Linux 4.7.3 嵌入式开发环境 —— U-BOOT 引导 Kernel
经过若干天的反复测试,搜索.终于成功利用 Qemu 在 u-boot 下引导 ARM Linux 4.7.3 内核.如下详细解释整个构建过程. 准备环境 运行环境:Ubuntu 16.04 需要的虚拟 ...
- arm汇编--ubuntu12.04 安装arm-linux交叉编译环境
1. 安装标准的C开发环境,由于Ubuntu安装默认是不安装的,所以需要先安装一下:sudo apt-get install gcc g++ libgcc1 libg++ make gdb 2.从ft ...
- 【Qt开发】【VS开发】【Linux开发】OpenCV、Qt-MinGw、Qt-msvc、VS2010、VS2015、Ubuntu Linux、ARM Linux中几个特别容易混淆的内容
[Qt开发][VS开发][Linux开发]OpenCV.Qt-MinGw.Qt-msvc.VS2010.VS2015.Ubuntu Linux.ARM Linux中几个特别容易混淆的内容 标签:[Qt ...
- arm linux 移植 x265
背景 本来想着把 x265编译到ffmpeg里面,搞定了x265的编译:但是一直报ERROR: x265 not found using pkg-config这个错误,我按照网上的资料,查看了ffbu ...
随机推荐
- Delphi WebService 需要注意 转
原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 .作者信息和本声明.否则将追究法律责任.http://gang4415.blog.51cto.com/225775/251997 Web ...
- HDU-4952 Number Transformation
http://acm.hdu.edu.cn/showproblem.php?pid=4952 Number Transformation Time Limit: 2000/1000 MS (Java/ ...
- Linux学习笔记9——make和Makefile
一,Makefile的文件名 默认的情况下,make命令会在当前目录下按顺序找寻文件名为“GNUmakefile”.“makefile”.“Makefile”的文件,大部分情况下,make命令都支持m ...
- Hibernate(四)基本映射
映射的概念 在上次的博文Hibernate(三)Hibernate 配置文件我们已经讲解了一下 Hibernate中的两种配置文件,其中提到了两种配置文件的主要区别就是XML可以配置映射.这里提到的映 ...
- Django中的Ajax
Ajax 很多时候,我们在网页上请求操作时,不需要刷新页面.实现这种功能的技术就要Ajax!(本人定义,不可迷信) jQuery中的ajax就可以实现不刷新页面就能向后台请求或提交数据的功能,我们仍然 ...
- 更改Tomcat startup.bat启动窗口名称
通常在Tomcat bin目录下用startup.bat启动Tomcat ,启动窗口显示的Title是Tomcat 如果遇到一个服务器上多个Tomcat的话就会容易混淆.更改方法如下: 1 在bin目 ...
- CSS3 经典教程系列:CSS3 线性渐变(linear-gradient)
摘自: http://www.cnblogs.com/lhb25/archive/2013/01/30/css3-linear-gradient.html
- win7重装系统时,使用PE工具箱进入系统看到的“C盘变成0.2G,D盘变成48G左右”这是什么回事?
引入: 今天帮同学重装系统,重装系统使用的方法是利用PE工具箱制作出启动U盘,进行重装系统. 我的步骤是 第一步:开机按F2挂载U盘优先启动,于是开机时就进入PE微系统 第二步: 用分区工具(Disk ...
- poj 2926 Requirements
点击打开poj 2926 思路: n维空间计算最远的曼哈顿距离 分析: 1 题目给定n个5维的点,要求最远的曼哈顿距离 2 求最远曼哈顿距离,对于一个n维的空间,其中两点的曼哈顿距离为:|x1-x2| ...
- [Javascript] Array - join()
The join() method joins all elements of an array into a string. var name = 'shane osbourne'; var upp ...