启动客户端的命令

/opt/cloudera/parcels/KAFKA-4.0.-1.4.0.0.p0./bin/kafka-console-producer --broker-list hadoop102:9092 --topic topic_start

去上面目录下找到kafka-console-consumer

#!/bin/bash
# Reference: http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in
SOURCE="${BASH_SOURCE[0]}"
BIN_DIR="$( dirname "$SOURCE" )"
while [ -h "$SOURCE" ]
do
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
done
BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
LIB_DIR=$BIN_DIR/../lib # Autodetect JAVA_HOME if not defined
if [ -e $LIB_DIR/../../CDH/lib/bigtop-utils/bigtop-detect-javahome ] ; then
. $LIB_DIR/../../CDH/lib/bigtop-utils/bigtop-detect-javahome
fi exec $LIB_DIR/kafka/bin/kafka-console-producer.sh "$@"

BASH_SOURCE[0] - 等价于 BASH_SOURCE ,取得当前执行的 shell 文件所在的路径及文件名${BASH_SOURCE[0]}表示bash脚本的第一个参数(如果第一个参数是bash,表明这是要执行bash脚本,这时"${BASH_SOURCE[0]}"自动转换为第二个参数),例如:

bash modules/tools/planning_traj_plot/run.sh,${BASH_SOURCE[0]}代表的是modules/tools/planning_traj_plot/run.sh。

dirname - 去除文件名中的非目录部分,仅显示与目录有关的部分,即提取文件的目录,例如modules/tools/planning_traj_plot/run.sh”的目录为"modules/tools/planning_traj_plot。
$() - 相当于 `command`, 即获取command命令的结果
&& - 逻辑运算符号,只有当&&左边运行成功时才会运行&&右边的命令

cd -P 表示切换到真正的物理目录,而不是软连接目录

 综上,BIN_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )",表示当前脚本的绝对路径
LIB_DIR=$BIN_DIR/../lib,表示当前脚本上一层目录下的lib目录
-e filename 如果 filename存在,则为真
-d filename 如果 filename为目录,则为真
-f filename 如果 filename为常规文件,则为真
-L filename 如果 filename为符号链接,则为真
-r filename 如果 filename可读,则为真
-w filename 如果 filename可写,则为真
-x filename 如果 filename可执行,则为真
-s filename 如果文件长度不为0,则为真
-h filename 如果文件是软链接,则为真
filename1 -nt filename2 如果 filename1比 filename2新,则为真。
filename1 -ot filename2 如果 filename1比 filename2旧,则为真。
-eq 等于
-ne 不等于
-gt 大于
-ge 大于等于
-lt 小于
-le 小于等于

于是在lib的/kafka/bin下,找到脚本kafka-console-producer.sh

#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"
dirname $0,取得当前执行的脚本文件的目录
找到了和当前脚本同一目录的kafka-run-class.sh文件
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. if [ $# -lt ];
then
echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
exit
fi # CYGINW == if Cygwin is detected, else .
if [[ $(uname -a) =~ "CYGWIN" ]]; then
CYGWIN=
else
CYGWIN=
fi if [ -z "$INCLUDE_TEST_JARS" ]; then
INCLUDE_TEST_JARS=false
fi # Exclude jars not necessary for running commands.
regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$"
should_include_file() {
if [ "$INCLUDE_TEST_JARS" = true ]; then
return
fi
file=$
if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
return
else
return
fi
} base_dir=$(dirname $)/.. if [ -z "$SCALA_VERSION" ]; then
SCALA_VERSION=2.11.
fi if [ -z "$SCALA_BINARY_VERSION" ]; then
SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f - -d '.')
fi # run ./gradlew copyDependantLibs to get all dependant jars in a local dir
shopt -s nullglob
for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
clients_lib_dir=$(dirname $)/../clients/build/libs
streams_lib_dir=$(dirname $)/../streams/build/libs
rocksdb_lib_dir=$(dirname $)/../streams/build/dependant-libs-${SCALA_VERSION}
else
clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
streams_lib_dir=$clients_lib_dir
rocksdb_lib_dir=$streams_lib_dir
fi for file in "$clients_lib_dir"/kafka-clients*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done for file in "$streams_lib_dir"/kafka-streams*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
else
VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS::((${#VERSION_NO_DOTS} - ))} # remove last char, ie, bug-fix number
for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$file":"$CLASSPATH"
fi
done
fi for file in "$rocksdb_lib_dir"/rocksdb*.jar;
do
CLASSPATH="$CLASSPATH":"$file"
done for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done for cc_pkg in "api" "transforms" "runtime" "file" "json" "tools" "basic-auth-extension"
do
for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
fi
done # classpath addition for release
for file in "$base_dir"/libs/*;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done # Set SENTRY_HOME if possible and add Sentry jars to classpath
if [[ -z "$SENTRY_HOME" ]]; then
if [[ -d ${base_dir}/../sentry ]]; then
export SENTRY_HOME=`readlink -m ${base_dir}/../sentry`
fi
fi
if [[ -n "$SENTRY_HOME" ]]; then
for f in ${SENTRY_HOME}/lib/*.jar ${SENTRY_HOME}/lib/plugins/*.jar; do
export CLASSPATH=${CLASSPATH}:${f}
done
fi # Include the sentry configuration on the classpath
if [ -z "$SENTRY_CONF_DIR" ]; then
SENTRY_CONF_DIR="/etc/kafka/conf/sentry-conf"
fi
export CLASSPATH=${CLASSPATH}:${SENTRY_CONF_DIR} shopt -u nullglob if [ -z "$CLASSPATH" ] ; then
echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
exit 1
fi # JMX settings
if [ -z "$KAFKA_JMX_OPTS" ]; then
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
fi # JMX port to use
if [ $JMX_PORT ]; then
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
fi # Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
LOG_DIR="$base_dir/logs"
fi # Log4j settings
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
# Log to console. This is a tool.
LOG4J_DIR="$base_dir/config/tools-log4j.properties"
# If Cygwin is detected, LOG4J_DIR is converted to Windows format.
(( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
else
# create logs directory
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi
fi # If Cygwin is detected, LOG_DIR is converted to Windows format.
(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS" # Generic jvm settings you want to add
if [ -z "$KAFKA_OPTS" ]; then
KAFKA_OPTS=""
fi # Set Debug options if enabled
if [ "x$KAFKA_DEBUG" != "x" ]; then # Use default ports
DEFAULT_JAVA_DEBUG_PORT="5005" if [ -z "$JAVA_DEBUG_PORT" ]; then
JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
fi # Use the defaults if JAVA_DEBUG_OPTS was not set
DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
if [ -z "$JAVA_DEBUG_OPTS" ]; then
JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
fi echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
fi # Which java to use
if [ -z "$JAVA_HOME" ]; then
JAVA="java"
else
JAVA="$JAVA_HOME/bin/java"
fi # Memory options
if [ -z "$KAFKA_HEAP_OPTS" ]; then
KAFKA_HEAP_OPTS="-Xmx256M"
fi # JVM performance options
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true"
fi # version option
for args in "$@" ; do
if [ "$args" = "--version" ]; then
exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "kafka.utils.VersionInfo"
fi
done while [ $# -gt 0 ]; do
COMMAND=$1
case $COMMAND in
-name)
DAEMON_NAME=$2
CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
shift 2
;;
-loggc)
if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
GC_LOG_ENABLED="true"
fi
shift
;;
-daemon)
DAEMON_MODE="true"
shift
;;
*)
break
;;
esac
done # GC options
GC_FILE_SUFFIX='-gc.log'
GC_LOG_FILE_NAME=''
if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX # The first segment of the version number, which is '1' for releases before Java 9
# it then becomes '9', '10', ...
# Some examples of the first line of `java --version`:
# 8 -> java version "1.8.0_152"
# 9.0.4 -> java version "9.0.4"
# 10 -> java version "10" 2018-03-20
# 10.0.1 -> java version "10.0.1" 2018-04-17
# We need to match to the end of the line to prevent sed from printing the characters that do not match
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=102400"
else
KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
fi # Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
# Syntax used on the right side is native Bash string manipulation; for more details see
# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
CLASSPATH=${CLASSPATH#:} # If Cygwin is detected, classpath is converted to Windows format.
(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}") # Launch mode
if [ "x$DAEMON_MODE" = "xtrue" ]; then
nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
else
exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
fi

CDHkafka脚本的更多相关文章

  1. Apache执行Python脚本

    由于经常需要到服务器上执行些命令,有些命令懒得敲,就准备写点脚本直接浏览器调用就好了,比如这样: 因为线上有现成的Apache,就直接放它里面了,当然访问安全要设置,我似乎别的随笔里写了安全问题,这里 ...

  2. SQL Server镜像自动生成脚本

    SQL Server镜像自动生成脚本 镜像的搭建非常繁琐,花了一点时间写了这个脚本,方便大家搭建镜像 执行完这个镜像脚本之后,最好在每台机器都绑定一下hosts文件,不然的话,镜像可能会不work 1 ...

  3. 分享一个SQLSERVER脚本(计算数据库中各个表的数据量和每行记录所占用空间)

    分享一个SQLSERVER脚本(计算数据库中各个表的数据量和每行记录所占用空间) 很多时候我们都需要计算数据库中各个表的数据量和每行记录所占用空间 这里共享一个脚本 CREATE TABLE #tab ...

  4. 探真无阻塞加载javascript脚本技术,我们会发现很多意想不到的秘密

    下面的图片是我使用firefox和chrome浏览百度首页时候记录的http请求 下面是firefox: 下面是chrome: 在浏览百度首页前我都将浏览器的缓存全部清理掉,让这个场景最接近第一次访问 ...

  5. 第一个shell脚本

    打开文本编辑器,新建一个文件,扩展名为sh(sh代表shell),扩展名并不影响脚本执行,见名知意就好. #!/bin/bash echo "Hello World !" &quo ...

  6. Java 8 的 Nashorn 脚本引擎教程

    本文为了解所有关于 Nashorn JavaScript 引擎易于理解的代码例子. Nashorn JavaScript 引擎是Java SE 8的一部分,它与其它像Google V8 (它是Goog ...

  7. 【开源】.Net 动态脚本引擎NScript

    开源地址: https://git.oschina.net/chejiangyi/NScript 开源QQ群: .net 开源基础服务  238543768 .Net 动态脚本引擎 NScript   ...

  8. InstallShield 脚本语言学习笔记

    InstallShield脚本语言是类似C语言,利用InstallShield的向导或模板都可以生成基本的脚本程序框架,可以在此基础上按自己的意愿进行修改和添加.     一.基本语法规则      ...

  9. <译>通过PowerShell工具跨多台服务器执行SQL脚本

    有时候,当我们并没有合适的第三方工具(大部分需要付费)去管理多台数据库服务器,那么如何做最省力.省心呢?!Powershell一个强大的工具,可以很方便帮到我们处理日常的数据库维护工作 .简单的几步搞 ...

随机推荐

  1. zoj 3325 Machine(线段树)

    题意:0~n-1的数组,初始值为0:执行m个操作,每次操作执行后输出当前值为0的连续段的段数. 操作1: p i j : i~j区间的每个元素值减1 操作2: r i j :i~j区间的每个元素值加1 ...

  2. dubbo所用到的技术

    服务注册:zookeeper 协议:dubbo  Hessian  Rmi 网络编程:netty 动态代理:jdk和Javassist 序列化:Hessian  Dubbo  Json  Java S ...

  3. 【leetcode】897. Increasing Order Search Tree

    题目如下: 解题思路:我的方法是先用递归的方法找出最左边的节点,接下来再对树做一次递归中序遍历,找到最左边节点后将其设为root,其余节点依次插入即可. 代码如下: # Definition for ...

  4. 连载 3:利用 matlab计算卷积

  5. linux0.11内核源码——进程各状态切换的跟踪

    准备工作 1.进程的状态有五种:新建(N),就绪或等待(J),睡眠或阻塞(W),运行(R),退出(E),其实还有个僵尸进程,这里先忽略 2.编写一个样本程序process.c,里面实现了一个函数 /* ...

  6. VMware安装MAC OS

    测试环境 安装环境:win10 .VMware Workstation Pro14 镜像:OS X 10.11.5(由于太大,就没有上传网盘,网上也有很多资源) 安装准备 安装前先把关于VMware的 ...

  7. windows平台使用MongoDB shell 来连接 MongoDB 服务器并创建数据库

    windows平台使用MongoDB shell 来连接 MongoDB 服务器并创建数据库 命令行进入MongoDB的bin目录运行mongod.exe mongod --dbpath c:\dat ...

  8. (一)arm交叉编译工具链准备

    1.背景 arm机器一般因为资源问题进行编译会影响开发速度,而且很多时候因为资源不够而无法完成编译工作.因此,需要在执行机上进行交叉编译,即使用x86或其他架构机器基于交叉编译工具编译出在arm上可以 ...

  9. redis 服务器端安装(三)

    redis 服务器端安装(三) Redis is an open source, BSD licensed, advanced key-value store. It is often referre ...

  10. 伪造请求头向url传递参数爬取百度默认翻译

    from urllib import request,parse import json # 翻译函数 def fanyi(msg): #参数封装 data = { "kw": c ...