linux 多个文件中查找字符串 hadoop 3 安装 调试
http://www.cnblogs.com/iLoveMyD/p/4281534.html
2015年2月9日 14:36:38
# find <directory> -type f -name "*.c" | xargs grep "<strings>" <directory>是你要找的文件夹;如果是当前文件夹可以省略
-type f 意思是只找文件
-name "*.c" 表示只找C语言写的代码,从而避免去查binary;也可以不写,表示找所有文件
<strings>是你要找的某个字符串 Stopping secondary namenodes [bigdata-server-02]
Last login: Thu Dec 21 17:18:39 CST 2017 on pts/0
ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting.
Stopping nodemanagers
Last login: Thu Dec 21 17:18:42 CST 2017 on pts/0
Stopping resourcemanager
Last login: Thu Dec 21 17:18:46 CST 2017 on pts/0
[root@bigdata-server-02 hadoop]# vim etc/hadoop/hadoop-env.sh [root@bigdata-server-02 hadoop]# find . -type f | xargs grep HADOOP_WORKER
./sbin/workers.sh:# HADOOP_WORKERS File naming remote hosts.
./sbin/workers.sh:# HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands.
grep: ./share/hadoop/yarn/webapps/ui2/assets/images/datatables/Sorting: No such file or directory
grep: icons.psd: No such file or directory
./share/doc/hadoop/hadoop-project-dist/hadoop-common/UnixShellAPI.html:<p>Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES} and execute command.</p>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/UnixShellAPI.html:<p>Connect to ${HADOOP_WORKER_NAMES} and execute command under the environment which does not support pdsh.</p>
./bin/hadoop:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./bin/yarn:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./bin/mapred:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./bin/hdfs:if [[ ${HADOOP_WORKER_MODE} = true ]]; then
./etc/hadoop/hadoop-env.sh:#export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
./etc/hadoop/hadoop-user-functions.sh.example:# tmpslvnames=$(echo "${HADOOP_WORKER_NAMES}" | tr ' ' '\n' )
./libexec/hadoop-config.cmd: set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2
./libexec/hadoop-config.sh:hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS
./libexec/hadoop-config.sh:hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES
./libexec/hadoop-config.sh:hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP
./libexec/yarn-config.sh: hadoop_deprecate_envvar YARN_SLAVES HADOOP_WORKERS
./libexec/hadoop-functions.sh: HADOOP_WORKERS="${workersfile}"
./libexec/hadoop-functions.sh: HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
./libexec/hadoop-functions.sh:## @description Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES}
./libexec/hadoop-functions.sh: if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
./libexec/hadoop-functions.sh: hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting."
./libexec/hadoop-functions.sh: elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
./libexec/hadoop-functions.sh: if [[ -n "${HADOOP_WORKERS}" ]]; then
./libexec/hadoop-functions.sh: worker_file=${HADOOP_WORKERS}
./libexec/hadoop-functions.sh: if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
./libexec/hadoop-functions.sh: tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
./libexec/hadoop-functions.sh: if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
./libexec/hadoop-functions.sh: HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
./libexec/hadoop-functions.sh:## @description Connect to ${HADOOP_WORKER_NAMES} and execute command
./libexec/hadoop-functions.sh: local workers=(${HADOOP_WORKER_NAMES})
./libexec/hadoop-functions.sh: HADOOP_WORKER_NAMES="$1"
./libexec/hadoop-functions.sh: HADOOP_WORKER_MODE=true
[root@bigdata-server-02 hadoop]#
[root@hadoop3 logs]# cat hadoop-root-namenode-hadoop3.log
2017-12-29 15:06:50,183 INFO org.apache.hadoop.http.HttpServer2: addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.namenode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/*
2017-12-29 15:06:50,190 INFO org.apache.hadoop.http.HttpServer2: HttpServer.start() threw a non Bind IOException
java.net.BindException: Port in use: 0.0.0.0:9870
at org.apache.hadoop.http.HttpServer2.constructBindException(HttpServer2.java:1133)
at org.apache.hadoop.http.HttpServer2.bindForSinglePort(HttpServer2.java:1155)
at org.apache.hadoop.http.HttpServer2.openListeners(HttpServer2.java:1214)
at org.apache.hadoop.http.HttpServer2.start(HttpServer2.java:1069)
at org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.start(NameNodeHttpServer.java:173)
at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:888)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:724)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:950)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:929)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1653)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1720)
Caused by: java.net.BindException: 地址已在使用
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:433)
at sun.nio.ch.Net.bind(Net.java:425)
at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
at org.eclipse.jetty.server.ServerConnector.open(ServerConnector.java:317)
at org.apache.hadoop.http.HttpServer2.bindListener(HttpServer2.java:1120)
at org.apache.hadoop.http.HttpServer2.bindForSinglePort(HttpServer2.java:1151)
... 9 more
2017-12-29 15:06:50,192 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Stopping NameNode metrics system...
但是并没有配置这个端口啊
find检索字符串
[root@hadoop3 hadoop]# find . -type f | xargs grep 9870
grep: ./share/hadoop/yarn/webapps/ui2/assets/images/datatables/Sorting: 没有那个文件或目录
grep: icons.psd: 没有那个文件或目录
./share/doc/hadoop/hadoop-yarn/hadoop-yarn-registry/apidocs/org/apache/hadoop/registry/client/types/AddressTypes.html: ["namenode.example.org", "9870"]
./share/doc/hadoop/api/org/apache/hadoop/registry/client/types/AddressTypes.html: ["namenode.example.org", "9870"]
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml: <value>0.0.0.0:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html:<p>NameNode and DataNode each run an internal web server in order to display basic information about the current status of the cluster. With the default configuration, the NameNode front page is at <tt>http://namenode-name:9870/</tt>. It lists the DataNodes in the cluster and basic statistics of the cluster. The web interface can also be used to browse the file system (using “Browse the file system” link on the NameNode front page).</p></div>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html: <value>machine1.example.com:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html: <value>machine2.example.com:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html: <value>machine3.example.com:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html: <value>machine1.example.com:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html: <value>machine2.example.com:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html: <value>machine3.example.com:9870</value>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/release/3.0.0-alpha1/CHANGES.3.0.0-alpha1.html:<td align="left"> <a class="externalLink" href="https://issues.apache.org/jira/browse/HDFS-9870">HDFS-9870</a> </td>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/release/3.0.0-alpha1/RELEASENOTES.3.0.0-alpha1.html:<p>The patch updates the HDFS default HTTP/RPC ports to non-ephemeral ports. The changes are listed below: Namenode ports: 50470 –> 9871, 50070 –> 9870, 8020 –> 9820 Secondary NN ports: 50091 –> 9869, 50090 –> 9868 Datanode ports: 50020 –> 9867, 50010 –> 9866, 50475 –> 9865, 50075 –> 9864</p><hr />
./share/doc/hadoop/hadoop-project-dist/hadoop-common/release/2.8.0/CHANGES.2.8.0.html:<td align="left"> <a class="externalLink" href="https://issues.apache.org/jira/browse/HDFS-9870">HDFS-9870</a> </td>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/CommandsManual.html:<pre class="source">$ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
./share/doc/hadoop/hadoop-project-dist/hadoop-common/SingleCluster.html:<li>NameNode - <tt>http://localhost:9870/</tt></li>
./share/doc/hadoop/hadoop-project-dist/hadoop-common/ClusterSetup.html:<td align="left"> Default HTTP port is 9870. </td></tr>
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:06:50,085 INFO org.apache.hadoop.hdfs.DFSUtil: Starting Web-server for hdfs at: http://0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:06:50,193 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1: java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:23:48,931 INFO org.apache.hadoop.hdfs.DFSUtil: Starting Web-server for hdfs at: http://0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:java.net.BindException: Port in use: 0.0.0.0:9870
./logs/hadoop-root-namenode-hadoop3.log:2017-12-29 15:23:49,035 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1: java.net.BindException: Port in use: 0.0.0.0:9870
[root@hadoop3 hadoop]# xlc
Stopping namenodes on [hadoop3]
9870为默认
./share/doc/hadoop/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml: <value>0.0.0.0:9870</value>
<property>
<name>dfs.namenode.http-address</name>
<value>0.0.0.0:9870</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
</description>
</property>
vim查找9870
:/9870
:?9870
linux 多个文件中查找字符串 hadoop 3 安装 调试的更多相关文章
- linux 多个文件中查找字符串
2015年2月9日 14:36:38 # find <directory> -type f -name "*.c" | xargs grep "<str ...
- 在文件夹中 的指定类型文件中 查找字符串(CodeBlocks+GCC编译,控制台程序,仅能在Windows上运行)
说明: 程序使用 io.h 中的 _findfirst 和 _findnext 函数遍历文件夹,故而程序只能在 Windows 下使用. 程序遍历当前文件夹,对其中的文件夹执行递归遍历.同时检查遍历到 ...
- linux上查找文件存放地点和文件中查找字符串方法
一.查找文件存放地点 1.locate 语法:locate <filename> locate命令实际是"find -name"的另一种写法,但是查找方式跟find不同 ...
- 在文件夹下所有文件中查找字符串(linux/windows)
在linux下可以用 grep "String" filename.txt#字符串 文件名grep -r "String" /home/#递归查找目录下所有文件 ...
- Linux平台从文件中查找字符赋值于变量
以telnet方式登录Linux主机,在默认目录下用命令创建一个包含DUT wanIP的文本文件.[root] echo wanIP=88.0.100.253 > ./wanIP.txt在默认目 ...
- linux命令统计文件中某个字符串出现的次数
1.使用grep linux grep命令在我的随笔linux分类里有过简单的介绍,这里就只简单的介绍下使用grep命令统计某个文件这某个字符串出现的次数,首先介绍grep命令的几个参数,详细参数请自 ...
- linux批量修改文件中包含字符串的查找替换
find -name "*.env" | xargs perl -pi -e 's|\babcdefg\b|hahaha|g' .env 文件中abcdef 改为hahaha
- 【Linux】查询文件中指定字符串的记录
语法 cat 文件 |grep 查询字符串 例如现在有文件file.dat,文件中内容如下: zhangsan Lisi wangwu123 wangwu890 zhangsan28290 现在想从文 ...
- linux在所有文件中查找某一个字符
# find <directory> -type f -name "*.c" | xargs grep "<strings>" < ...
随机推荐
- Python通过Openpyxl包汇总表格,效率提升100倍
最近找了份小兼职,干的全是些无聊的工作,比如说给word调整一下排版啦.把从多方回收来的Excel汇总啦,这些极其催眠又耗时的事,怎么能接受手动去做呢!!(疯了嘛,谁知道以后还有多少类似的表格要汇总啊 ...
- python 学习总结1
计算机与程序设计 一.计算机的概念 1.计算机是根据指令操作数据的设备. 2.计算机主要包括两个功能性一个是功能性另一个是计算性 功能性是对数据的操作,表现为数据计算,输入输出处理和结果存储 可编程性 ...
- 【POJ 2891】Strange Way to Express Integers(一元线性同余方程组求解)
Description Elina is reading a book written by Rujia Liu, which introduces a strange way to express ...
- PS学习笔记(03)
ui到底是什么? 很多同学不知道ui是什么,以为画个ICON图标就是做ui了,导致很多人都忙着画各种各样的图标.这样很容易让那些新人们走错路,最后我想说的是icon不是全部,不要沉迷其中,要学的还有很 ...
- 【转】关于大型网站技术演进的思考(二十)--网站静态化处理—web前端优化—中(12)
Web前端很多优化原则都是从如何提升网络通讯效率的角度提出的,但是这些原则使用的时候还是有很多陷阱在里面,如果我们不能深入理解这些优化原则背后所隐藏的技术原理,很有可能掉进这些陷阱里,最终没有达到最佳 ...
- DP在字符匹配上的实现
在此保存下近段时间做的DP在字符匹配上的实现的题目 对于不同的字符串来说,2者只能不断将下标往后推移来实现匹配从而得到的最大匹配数 如 abcd 和 dcba 这个最大匹配数只能为1,因为两个d匹配后 ...
- [luoguP1773] 符文之语_NOI导刊2010提高(02)(DP)
传送门 f[i][j]表示前i个数余数为j的最优解 sum[i][j]表示字符串i~j所构成的数 #include <cstdio> #include <cstring> #d ...
- 第k小整数(树状数组)
洛谷传送门 入门难度.. 没错,但是我并不是要暴力做. 而是用树状数组来做. 先离散化,然后随便搞一搞就可以了.(晕.比暴力还慢) 如果要查找某一区间的的话可以把区间取出重新建树,然后再求.(更暴力) ...
- lubuntu通过Smb访问Windows共享目录
lubuntu通过Smb访问Windows共享目录 如果未安装Smb,先安装: apt-get install smbclient smbfs 安装后,查看共享主机上的共享目录: CentOS/Red ...
- JavaBean映射工具dozer学习
阅读更多 转载自http://lishaorui.iteye.com/blog/1151513 1.简介 dozer是一种JavaBean的映射工具,类似于apache的BeanUtils.但 ...