mogodb监控脚本
mongodb_server.py
#! /bin/env python
#-*- coding:utf8 -*- import sys
import os
from bson.timestamp import Timestamp import pymongo
from pymongo import MongoClient class mongodbMonitor(object): def mongodb_connect(self,host=None, port=None, user=None, password=None):
# try:
conn = MongoClient(host, port, serverSelectionTimeoutMS=) # conntion timeout sec. if user and password:
db_admin = conn["admin"]
if not db_admin.authenticate(user,password):
pass;
conn.server_info()
print conn
# except :
# e = sys.exc_info()[]
# return e, None return ,conn #data node(): standalone, replset primary, replset secondary. mongos(), mongoConfigSrv()
def get_mongo_role(self, conn): mongo_role =
conn.server_info()
if (conn.is_mongos):
mongo_role =
elif ("chunks" in conn.get_database("config").collection_names()): # Role is a config servers? not mongos and has config.chunks collections. it's a config server.
mongo_role =
return mongo_role def get_mongo_monitor_data(self, conn): mongo_monitor_dict ={}
mongo_monitor_dict["mongo_local_alive"] = # mongo local alive metric for all nodes.
mongo_role = self.get_mongo_role(conn) if(mongo_role == ):
mongodb_role,serverStatus_dict = self.serverStatus(conn)
mongo_monitor_dict.update(serverStatus_dict)
repl_status_dict = {}
if (mongodb_role == "master" or mongodb_role == "secondary"):
repl_status_dict = self.repl_status(conn)
mongo_monitor_dict.update(repl_status_dict)
else:
print "this is standalone node"
elif(mongo_role == ): # mongos
shards_dict = self.shard_status(conn)
mongo_monitor_dict.update(shards_dict)
return mongo_monitor_dict def serverStatus(self,connection): serverStatus = connection.admin.command(pymongo.son_manipulator.SON([('serverStatus', )])) mongodb_server_dict = {} # mongodb server status metric for upload to falcon mongo_version = serverStatus["version"]
#uptime metric
mongodb_server_dict["uptime"] = int(serverStatus["uptime"]) #asserts section metrics
mongo_asserts = serverStatus["asserts"]
for asserts_key in mongo_asserts.keys():
asserts_key_name = "asserts_" + asserts_key
mongodb_server_dict[asserts_key_name] = mongo_asserts[asserts_key] ### "extra_info" section metrics: page_faults. falcon counter type.
if serverStatus.has_key("extra_info"):
mongodb_server_dict["page_faults"] = serverStatus["extra_info"]["page_faults"] ### "connections" section metrics
current_conn = serverStatus["connections"]["current"]
available_conn = serverStatus["connections"]["available"] mongodb_server_dict["connections_current"] = current_conn
mongodb_server_dict["connections_available"] = available_conn # mongodb connection used percent
mongodb_server_dict["connections_used_percent"] = int((current_conn/(current_conn + available_conn)*)) # total created from mongodb started. COUNTER metric
mongodb_server_dict["connections_totalCreated"] = serverStatus["connections"]["totalCreated"] # "globalLock" currentQueue mongodb_server_dict["globalLock_currentQueue_total"] = serverStatus["globalLock"]["currentQueue"]["total"]
mongodb_server_dict["globalLock_currentQueue_readers"] = serverStatus["globalLock"]["currentQueue"]["readers"]
mongodb_server_dict["globalLock_currentQueue_writers"] = serverStatus["globalLock"]["currentQueue"]["writers"] # "locks" section, Changed in version 3.0
if serverStatus.has_key("locks") and mongo_version >"3.0":
locks_dict_keys = serverStatus["locks"].keys()
for lock_scope in locks_dict_keys: # Global, Database,Collection,Oplog
for lock_metric in serverStatus["locks"][lock_scope]:
for lock_type in serverStatus["locks"][lock_scope][lock_metric]: if lock_type == "R":
lock_name = "Slock"
elif lock_type == "W":
lock_name = "Xlock"
elif lock_type == "r":
lock_name = "ISlock"
elif lock_type == "w":
lock_name = "IXlock"
lock_metric_key = "locks_" + lock_scope + "_" + lock_metric + "_" + lock_name
mongodb_server_dict[lock_metric_key] = serverStatus["locks"][lock_scope][lock_metric][lock_type] # "network" section metrics: bytesIn, bytesOut, numRequests; counter type
if serverStatus.has_key("network"):
for network_metric in serverStatus["network"].keys():
network_metric_key = "network_" + network_metric # network metric key for upload
mongodb_server_dict[network_metric_key] = serverStatus["network"][network_metric] ### "opcounters" section metrics: insert, query, update, delete, getmore, command. couter type
if serverStatus.has_key("opcounters"):
for opcounters_metric in serverStatus["opcounters"].keys():
opcounters_metric_key = "opcounters_" + opcounters_metric
mongodb_server_dict[opcounters_metric_key] = serverStatus["opcounters"][opcounters_metric] ### "opcountersRepl" section metrics: insert, query, update, delete, getmore, command. couter type
if serverStatus.has_key("opcountersRepl"):
for opcountersRepl_metric in serverStatus["opcountersRepl"].keys():
opcountersRepl_metric_key = "opcountersRepl_" + opcountersRepl_metric
mongodb_server_dict[opcountersRepl_metric_key] = serverStatus["opcounters"][opcountersRepl_metric] ### "mem" section metrics:
if serverStatus.has_key("mem"):
for mem_metric in serverStatus["mem"].keys():
mem_metric_key = "mem_" + mem_metric
if( mem_metric in ["bits","supported"] ):
mongodb_server_dict[mem_metric_key] = serverStatus["mem"][mem_metric]
else:
mongodb_server_dict[mem_metric_key] = serverStatus["mem"][mem_metric]** ### "dur" section metrics:
if serverStatus.has_key("dur"):
mongodb_server_dict["dur_journaledBytes"] = serverStatus["dur"]["journaledMB"]**
mongodb_server_dict["dur_writeToDataFilesBytes"] = serverStatus["dur"]["writeToDataFilesMB"]**
mongodb_server_dict["dur_commitsInWriteLock"] = serverStatus["dur"]["commitsInWriteLock"] ### "repl" section
mongodb_role = ""
if (serverStatus.has_key("repl") and serverStatus["repl"].has_key("secondary")):
if serverStatus["repl"]["ismaster"]:
mongodb_role = "master"
if serverStatus["repl"]["secondary"]:
mongodb_role = "secondary"
else: # not Replica sets mode
mongodb_role = "standalone" ### "backgroundFlushing" section metrics, only for MMAPv1
if serverStatus.has_key("backgroundFlushing"):
for bgFlush_metric in serverStatus["backgroundFlushing"].keys():
if bgFlush_metric != "last_finished": # discard last_finished metric
bgFlush_metric_key = "backgroundFlushing_" + bgFlush_metric
mongodb_server_dict[bgFlush_metric_key] = serverStatus["backgroundFlushing"][bgFlush_metric] ### cursor from "metrics" section
if serverStatus.has_key("metrics") and serverStatus["metrics"].has_key("cursor"):
cursor_status = serverStatus["metrics"]["cursor"]
mongodb_server_dict["cursor_timedOut"] = cursor_status["timedOut"]
mongodb_server_dict["cursor_open_noTimeout"] = cursor_status["open"]["noTimeout"]
mongodb_server_dict["cursor_open_pinned"] = cursor_status["open"]["pinned"]
mongodb_server_dict["cursor_open_total"] = cursor_status["open"]["total"] ### "wiredTiger" section
if serverStatus.has_key("wiredTiger"):
serverStatus_wt = serverStatus["wiredTiger"] #cache
wt_cache = serverStatus_wt["cache"]
mongodb_server_dict["wt_cache_used_total_bytes"] = wt_cache["bytes currently in the cache"]
mongodb_server_dict["wt_cache_dirty_bytes"] = wt_cache["tracked dirty bytes in the cache"]
mongodb_server_dict["wt_cache_readinto_bytes"] = wt_cache["bytes read into cache"]
mongodb_server_dict["wt_cache_writtenfrom_bytes"] = wt_cache["bytes written from cache"] #concurrentTransactions
wt_concurrentTransactions = serverStatus_wt["concurrentTransactions"]
mongodb_server_dict["wt_concurrentTransactions_write"] = wt_concurrentTransactions["write"]["available"]
mongodb_server_dict["wt_concurrentTransactions_read"] = wt_concurrentTransactions["read"]["available"] #"block-manager" section
wt_block_manager = serverStatus_wt["block-manager"]
mongodb_server_dict["wt_bm_bytes_read"] = wt_block_manager["bytes read"]
mongodb_server_dict["wt_bm_bytes_written"] = wt_block_manager["bytes written"]
mongodb_server_dict["wt_bm_blocks_read"] = wt_block_manager["blocks read" ]
mongodb_server_dict["wt_bm_blocks_written"] = wt_block_manager["blocks written"] ### "rocksdb" engine
if serverStatus.has_key("rocksdb"):
serverStatus_rocksdb = serverStatus["rocksdb"] mongodb_server_dict["rocksdb_num_immutable_mem_table"] = serverStatus_rocksdb["num-immutable-mem-table"]
mongodb_server_dict["rocksdb_mem_table_flush_pending"] = serverStatus_rocksdb["mem-table-flush-pending"]
mongodb_server_dict["rocksdb_compaction_pending"] = serverStatus_rocksdb["compaction-pending"]
mongodb_server_dict["rocksdb_background_errors"] = serverStatus_rocksdb["background-errors"]
mongodb_server_dict["rocksdb_num_entries_active_mem_table"] = serverStatus_rocksdb["num-entries-active-mem-table"]
mongodb_server_dict["rocksdb_num_entries_imm_mem_tables"] = serverStatus_rocksdb["num-entries-imm-mem-tables"]
mongodb_server_dict["rocksdb_num_snapshots"] = serverStatus_rocksdb["num-snapshots"]
mongodb_server_dict["rocksdb_oldest_snapshot_time"] = serverStatus_rocksdb["oldest-snapshot-time"]
mongodb_server_dict["rocksdb_num_live_versions"] = serverStatus_rocksdb["num-live-versions"]
mongodb_server_dict["rocksdb_total_live_recovery_units"] = serverStatus_rocksdb["total-live-recovery-units"] ### "PerconaFT" engine
if serverStatus.has_key("PerconaFT"):
serverStatus_PerconaFT = serverStatus["PerconaFT"] mongodb_server_dict["PerconaFT_log_count"] = serverStatus_PerconaFT["log"]["count"]
mongodb_server_dict["PerconaFT_log_time"] = serverStatus_PerconaFT["log"]["time"]
mongodb_server_dict["PerconaFT_log_bytes"] = serverStatus_PerconaFT["log"]["bytes"] mongodb_server_dict["PerconaFT_fsync_count"] = serverStatus_PerconaFT["fsync"]["count"]
mongodb_server_dict["PerconaFT_fsync_time"] = serverStatus_PerconaFT["fsync"]["time"] ### cachetable
PerconaFT_cachetable = serverStatus_PerconaFT["cachetable"]
mongodb_server_dict["PerconaFT_cachetable_size_current"] = PerconaFT_cachetable["size"]["current"]
mongodb_server_dict["PerconaFT_cachetable_size_writing"] = PerconaFT_cachetable["size"]["writing"]
mongodb_server_dict["PerconaFT_cachetable_size_limit"] = PerconaFT_cachetable["size"]["limit"] ### PerconaFT checkpoint
PerconaFT_checkpoint = serverStatus_PerconaFT["checkpoint"]
mongodb_server_dict["PerconaFT_checkpoint_count"] = PerconaFT_checkpoint["count"]
mongodb_server_dict["PerconaFT_checkpoint_time"] = PerconaFT_checkpoint["time"] mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_count"] = PerconaFT_checkpoint["write"]["nonleaf"]["count"]
mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_time"] = PerconaFT_checkpoint["write"]["nonleaf"]["time"]
mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_compressed"] = PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["compressed"]
mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_uncompressed"] = PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["uncompressed"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_count"] = PerconaFT_checkpoint["write"]["leaf"]["count"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_time"] = PerconaFT_checkpoint["write"]["leaf"]["time"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_compressed"] = PerconaFT_checkpoint["write"]["leaf"]["bytes"]["compressed"]
mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_uncompressed"] = PerconaFT_checkpoint["write"]["leaf"]["bytes"]["uncompressed"] ### serializeTime for serializeTime_item in serverStatus_PerconaFT["serializeTime"]:
prefix = "PerconaFT_serializeTime_" + serializeTime_item
for serializeTime_key in serverStatus_PerconaFT["serializeTime"][serializeTime_item]:
key_name = prefix + "_" + serializeTime_key
mongodb_server_dict[key_name] = serverStatus_PerconaFT["serializeTime"][serializeTime_item][serializeTime_key] ### PerconaFT compressionRatio
for compressionRatio_item in serverStatus_PerconaFT["compressionRatio"]:
key_name = "PerconaFT_compressionRatio_" + compressionRatio_item
mongodb_server_dict[key_name] = serverStatus_PerconaFT["compressionRatio"][compressionRatio_item] return (mongodb_role, mongodb_server_dict) def repl_status(self,connection):
replStatus = connection.admin.command("replSetGetStatus")
print replStatus
repl_status_dict = {} # repl set metric dict # myState "" for PRIMARY , "" for SECONDARY, "":
repl_status_dict["repl_myState"] = replStatus["myState"] repl_status_members = replStatus["members"] master_optime = # Master oplog ops time
myself_optime = # SECONDARY oplog ops time print "开始打印repl_status_members"
print repl_status_members
print "结束打印repl_status_members"
for repl_member in repl_status_members:
if repl_member.has_key("self") and repl_member["self"]:
repl_status_dict["repl_health"] = repl_member["health"]
#repl_status_dict["repl_optime"] = repl_member["optime"].time
#print "value of optime ts is:"
#print type(repl_member["optime"])
#print type(repl_member["optime"]["ts"])
print repl_member["optime"]["ts"].time
repl_status_dict["repl_optime"] = repl_member["optime"]["ts"].time
if repl_member.has_key("repl_electionTime"):
repl_status_dict["repl_electionTime"] = repl_member["electionTime"].time
if repl_member.has_key("repl_configVersion"):
repl_status_dict["repl_configVersion"] = repl_member["configVersion"]
#myself_optime = repl_member["optime"].time
myself_optime = repl_member["optime"]["ts"].time
if (replStatus["myState"] == and repl_member["state"] == ): # CONDARY ,get repl lag
master_optime = repl_member["optime"]["ts"].time
if replStatus["myState"] == : repl_status_dict["repl_lag"] = master_optime - myself_optime ### oplog window hours oplog_collection = connection["local"]["oplog.rs"] oplog_tFirst = oplog_collection.find({},{"ts":}).sort('$natural',pymongo.ASCENDING).limit().next()
oplog_tLast = oplog_collection.find({},{"ts":}).sort('$natural',pymongo.DESCENDING).limit().next() oplogrs_collstats = connection["local"].command("collstats", "oplog.rs") window_multiple = ##oplog.rs collections is not full
if oplogrs_collstats.has_key("maxSize"):
window_multiple = oplogrs_collstats["maxSize"]/(oplogrs_collstats["count"] * oplogrs_collstats["avgObjSize"])
else:
window_multiple = oplogrs_collstats["storageSize"]/(oplogrs_collstats["count"] * oplogrs_collstats["avgObjSize"]) #oplog_window .xx hours
oplog_window = round((oplog_tLast["ts"].time - oplog_tFirst["ts"].time)/3600.0,) * window_multiple # full repl_status_dict["repl_oplog_window"] = oplog_window return repl_status_dict # only for mongos node
def shard_status(self, conn): config_db = conn["config"] settings_col = config_db["settings"] balancer_doc = settings_col.find_one({'_id':'balancer'}) shards_dict = {}
if balancer_doc is None:
shards_dict["shards_BalancerState"] =
elif balancer_doc["stopped"]:
shards_dict["shards_BalancerState"] =
else:
shards_dict["shards_BalancerState"] = # shards_activeWindow metric,: without setting, :setting
# shards_activeWindow_start metric, { "start" : "23:30", "stop" : "6:00" } : 23.30 for :
# shards_activeWindow_stop metric if balancer_doc is None:
shards_dict["shards_activeWindow"] = elif balancer_doc.has_key("activeWindow"):
shards_dict["shards_activeWindow"] =
if balancer_doc["activeWindow"].has_key("start"):
window_start = balancer_doc["activeWindow"]["start"]
shards_dict["shards_activeWindow_start"] = window_start.replace(":",".") if balancer_doc["activeWindow"].has_key("stop"):
window_stop = balancer_doc["activeWindow"]["stop"]
shards_dict["shards_activeWindow_stop"] = window_stop.replace(":",".") # shards_chunkSize metric
chunksize_doc = settings_col.find_one({"_id" : "chunksize"})
if chunksize_doc is not None:
shards_dict["shards_chunkSize"] = chunksize_doc["value"] # shards_isBalancerRunning metric
locks_col = config_db["locks"]
balancer_lock_doc = locks_col.find_one({'_id':'balancer'}) if balancer_lock_doc is None:
print "config.locks collection empty or missing. be sure you are connected to a mongos"
shards_dict["shards_isBalancerRunning"] =
elif balancer_lock_doc["state"] > :
shards_dict["shards_isBalancerRunning"] =
else:
shards_dict["shards_isBalancerRunning"] = # shards_size metric shards_col = config_db["shards"]
shards_dict["shards_size"] = shards_col.count() # shards_mongosSize metric
mongos_col = config_db["mongos"]
shards_dict["shards_mongosSize"] = mongos_col.count() return shards_dict
monodb_monitor.py
#! /bin/env python
#-*- coding:utf8 -*- import sys
import os
import time
import datetime
import socket
import yaml
import requests
import json from mongodb_server import mongodbMonitor falcon_client = "http://127.0.0.1:1988/v1/push"
ts = int(time.time()) # all falcon counter type metrics list mongodb_counter_metric = ["uptime","asserts_msg",
"asserts_regular",
"asserts_rollovers",
"asserts_user",
"asserts_warning",
"page_faults",
"connections_totalCreated",
"locks_Global_acquireCount_ISlock",
"locks_Global_acquireCount_IXlock",
"locks_Global_acquireCount_Slock",
"locks_Global_acquireCount_Xlock",
"locks_Global_acquireWaitCount_ISlock",
"locks_Global_acquireWaitCount_IXlock",
"locks_Global_timeAcquiringMicros_ISlock",
"locks_Global_timeAcquiringMicros_IXlock",
"locks_Database_acquireCount_ISlock",
"locks_Database_acquireCount_IXlock",
"locks_Database_acquireCount_Slock",
"locks_Database_acquireCount_Xlock",
"locks_Collection_acquireCount_ISlock",
"locks_Collection_acquireCount_IXlock",
"locks_Collection_acquireCount_Xlock",
"opcounters_command",
"opcounters_insert",
"opcounters_delete",
"opcounters_update",
"opcounters_query",
"opcounters_getmore",
"opcountersRepl_command",
"opcountersRepl_insert",
"opcountersRepl_delete",
"opcountersRepl_update",
"opcountersRepl_query",
"opcountersRepl_getmore",
"network_bytesIn",
"network_bytesOut",
"network_numRequests",
"backgroundFlushing_flushes",
"backgroundFlushing_last_ms",
"cursor_timedOut",
"wt_cache_readinto_bytes",
"wt_cache_writtenfrom_bytes",
"wt_bm_bytes_read",
"wt_bm_bytes_written",
"wt_bm_blocks_read",
"wt_bm_blocks_written"
] with open('../../cfg.json') as f:
data = f.read().replace('\n','')
jsonlist = json.loads(data)
mongodb_hostname = jsonlist['hostname'] f=open("../conf/mongomon.conf")
y = yaml.load(f)
f.close()
mongodb_items = y["items"] for mongodb_ins in mongodb_items: mongodb_monitor = mongodbMonitor() mongodb_tag = "mongo=" + str(mongodb_ins["port"]) err,conn = mongodb_monitor.mongodb_connect(host="127.0.0.1",port=mongodb_ins["port"], user=mongodb_ins["user"], password=mongodb_ins["password"]) mongodb_upate_list = []
if err != :
key_item_dict = {"endpoint": mongodb_hostname, "metric": "mongo_local_alive", "tags":mongodb_tag , "timestamp":ts, "value": , "step": , "counterType": "GAUGE"}
mongodb_upate_list.append(key_item_dict)
r = requests.post(falcon_client,data=json.dumps(mongodb_upate_list))
continue #The instance is dead. upload the "mongo_alive_local=0" key, then continue. mongodb_dict = mongodb_monitor.get_mongo_monitor_data(conn)
mongodb_dict_keys = mongodb_dict.keys() for mongodb_metric in mongodb_dict_keys: if mongodb_metric in mongodb_counter_metric :
key_item_dict = {"endpoint": mongodb_hostname, "metric": mongodb_metric, "tags":mongodb_tag , "timestamp":ts, "value": mongodb_dict[mongodb_metric], "step": , "counterType": "COUNTER"}
else:
key_item_dict = {"endpoint": mongodb_hostname, "metric": mongodb_metric, "tags":mongodb_tag , "timestamp":ts, "value": mongodb_dict[mongodb_metric], "step": , "counterType": "GAUGE"} mongodb_upate_list.append(key_item_dict)
print "开始上报"
print json.dumps(mongodb_upate_list)
r = requests.post(falcon_client,data=json.dumps(mongodb_upate_list))
print r
mogodb监控脚本的更多相关文章
- nginx响应时间监控脚本
最近我们服务的使用方总是反应说我们接口超时,于是做了一个监控脚本,统计最近五分钟的响应情况,并对异常情况发送邮件报警. #!/bin/bash function define(){ ori_log_p ...
- Linux系统性能统计工具Sar和实时系统性能监控脚本
sar(System Activity Reporter系统活动情况报告)是目前 Linux 上最为全面的系统性能分析工具之一,可以从多方面对系统的活动进行报告,包括:文件的读写情况.系统调用的使用情 ...
- [工具开发] keepalived使用nagios监控脚本
最近在做开发和办公环境的高可用,采用的是keepalived:keepalived基于Linux内核支持的LVS,既能实现高可用,又能实现负载均衡,非常实用. keepalived监控服务状态时可以用 ...
- 关于mysql和Apache以及nginx的监控脚本怎么写会比较好的记录
最近,自己业务进行上线,上线后,需要考虑的是对各种服务进行监控,包括(httpd服务,mysqld服务等),现在想以mysqld服务为例总结下那种方式的脚本最为专业和合理: (1).根据mysql的端 ...
- MySQL慢日志监控脚本实例剖析
公司线上的 MySQL 慢日志,之前一直没有做好监控.趁着上周空闲,我就把监控脚本写了下,今天特地把代码发出来与51博友分享一下. 针对脚本的注解和整体构思,我会放到脚本之后为大家详解. 1 2 3 ...
- centos shell编程6一些工作中实践脚本 nagios监控脚本 自定义zabbix脚本 mysql备份脚本 zabbix错误日志 直接送给bc做计算 gzip innobackupex/Xtrabackup 第四十节课
centos shell编程6一些工作中实践脚本 nagios监控脚本 自定义zabbix脚本 mysql备份脚本 zabbix错误日志 直接送给bc做计算 gzip innobacku ...
- oracle监控脚本
简单命令 1.显示服务器上的可用实例:ps -ef | grep smon2.显示服务器上的可用监听器:ps -ef | grep -i listener | grep -v grep3.查看Orac ...
- linux服务监控脚本
配置需要监控的服务器 数组定义:host_ports=(host_name=host_port=uri_path)host_name为容易识别的服务器名称host_port为服务器ip和服务端口uri ...
- 【不积跬步,无以致千里】五个常用的Linux监控脚本代码
为大家提供五个常用Linux监控脚本(查看主机网卡流量.系统状况监控.监控主机的磁盘空间,当使用空间超过90%就通过发mail来发警告.监控CPU和内存的使用情况.全方位监控主机),有需要的朋友不妨看 ...
随机推荐
- redis数据类型之—Set
(1)set 简单介绍 set集合的数据是不重复的.无序的,一个集合类型键可以存储至多2^32-1 个字符串. (2)set 常用命令
- 前端面试-难点问题2-java和javascript的区别
经过多方面的参考,结合自己掌握的知识,总结了一下java和javascript的区别.欢迎大家评论! 一.关系 虽然java和javascript在名字和语法上有一点相似,并且有一些联系,javasc ...
- nodejs随记04
aes加密 资料 简介; 例子; process 改变工作目录: process.chdir(path); 路径计算 例子 获取调用执行所在文件地址 function getCaller() { tr ...
- SQLite 创建自增长标识列
SQLite Autoincrement(自动递增) SQLite 的 AUTOINCREMENT 是一个关键字,用于表中的字段值自动递增.我们可以在创建表时在特定的列名称上使用 AUTOINCREM ...
- PDA移动POS终端系统,实现专柜或店铺的收货、零售、盘点通过无线网络直接连接总部中央数据库,实现高效安全的移动供应链管理
利用PDA移动终端,实现专柜或店铺的收货.零售.盘点等一体化操作,通过无线网络直接连接总部中央数据库,实现高效安全的移动供应链管理. · PDA订货会应用解决方案利用PDA或电脑系统,在订货会现场直接 ...
- pandas 学习(2): pandas 数据结构之DataFrame
DataFrame 类型类似于数据库表结构的数据结构,其含有行索引和列索引,可以将DataFrame 想成是由相同索引的Series组成的Dict类型.在其底层是通过二维以及一维的数据块实现. 1. ...
- CodeForces 279D The Minimum Number of Variables 题解
题目大意: 有一组n个不相同的数字组成数串:a1,a2,a3-an. 1.一个数组b. 2.第一个操作我们将b0的值赋为a1.之后我们有n-1个操作,第k次操作我们将by=bi+bj(y,i,j可能相 ...
- 洛谷 P1387 最大正方形 Label:奇怪的解法
题目描述 在一个n*m的只包含0和1的矩阵里找出一个不包含0的最大正方形,输出边长. 输入输出格式 输入格式: 输入文件第一行为两个整数n,m(1<=n,m<=100),接下来n行,每行m ...
- Qt里的slot
昨天出了一个小bug, 一直调都没调出来, 今天仔细看了下, 发现出错的原因了. 我在用osgEarth的时候, 用到一个类MapCatalogWidget, 觉得它不够用, 就把这个类给改了下, 添 ...
- mysql like 贪婪匹配 同时匹配多个值
LIKE "%a%b%c%",这样匹配出的就是包含a,b,c三个关键词的记录 (三个关键词不在一起时) 不好用 mysql> select count(1) from dm ...