storm一些可调节的参数
| # Licensed to the Apache Software Foundation (ASF) under one | |
| # or more contributor license agreements. See the NOTICE file | |
| # distributed with this work for additional information | |
| # regarding copyright ownership. The ASF licenses this file | |
| # to you under the Apache License, Version 2.0 (the | |
| # "License"); you may not use this file except in compliance | |
| # with the License. You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| ########### These all have default values as shown | |
| ########### Additional configuration goes into storm.yaml | |
| java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/lib64" | |
| ### storm.* configs are general configurations | |
| # the local dir is where jars are kept | |
| storm.local.dir: "storm-local" | |
| storm.log4j2.conf.dir: "log4j2" | |
| storm.zookeeper.servers: | |
| - "localhost" | |
| storm.zookeeper.port: 2181 | |
| storm.zookeeper.root: "/storm" | |
| storm.zookeeper.session.timeout: 20000 | |
| storm.zookeeper.connection.timeout: 15000 | |
| storm.zookeeper.retry.times: 5 | |
| storm.zookeeper.retry.interval: 1000 | |
| storm.zookeeper.retry.intervalceiling.millis: 30000 | |
| storm.zookeeper.auth.user: null | |
| storm.zookeeper.auth.password: null | |
| storm.exhibitor.port: 8080 | |
| storm.exhibitor.poll.uripath: "/exhibitor/v1/cluster/list" | |
| storm.cluster.mode: "distributed" # can be distributed or local | |
| storm.local.mode.zmq: false | |
| storm.thrift.transport: "org.apache.storm.security.auth.SimpleTransportPlugin" | |
| storm.thrift.socket.timeout.ms: 600000 | |
| storm.principal.tolocal: "org.apache.storm.security.auth.DefaultPrincipalToLocal" | |
| storm.group.mapping.service: "org.apache.storm.security.auth.ShellBasedGroupsMapping" | |
| storm.group.mapping.service.params: null | |
| storm.messaging.transport: "org.apache.storm.messaging.netty.Context" | |
| storm.nimbus.retry.times: 5 | |
| storm.nimbus.retry.interval.millis: 2000 | |
| storm.nimbus.retry.intervalceiling.millis: 60000 | |
| storm.auth.simple-white-list.users: [] | |
| storm.cluster.state.store: "org.apache.storm.cluster.ZKStateStorageFactory" | |
| storm.meta.serialization.delegate: "org.apache.storm.serialization.GzipThriftSerializationDelegate" | |
| storm.codedistributor.class: "org.apache.storm.codedistributor.LocalFileSystemCodeDistributor" | |
| storm.workers.artifacts.dir: "workers-artifacts" | |
| storm.health.check.dir: "healthchecks" | |
| storm.health.check.timeout.ms: 5000 | |
| storm.disable.symlinks: false | |
| ### nimbus.* configs are for the master | |
| nimbus.seeds : ["localhost"] | |
| nimbus.thrift.port: 6627 | |
| nimbus.thrift.threads: 64 | |
| nimbus.thrift.max_buffer_size: 1048576 | |
| nimbus.childopts: "-Xmx1024m" | |
| nimbus.task.timeout.secs: 30 | |
| nimbus.supervisor.timeout.secs: 60 | |
| nimbus.monitor.freq.secs: 10 | |
| nimbus.cleanup.inbox.freq.secs: 600 | |
| nimbus.inbox.jar.expiration.secs: 3600 | |
| nimbus.code.sync.freq.secs: 120 | |
| nimbus.task.launch.secs: 120 | |
| nimbus.file.copy.expiration.secs: 600 | |
| nimbus.topology.validator: "org.apache.storm.nimbus.DefaultTopologyValidator" | |
| topology.min.replication.count: 1 | |
| topology.max.replication.wait.time.sec: 60 | |
| nimbus.credential.renewers.freq.secs: 600 | |
| nimbus.queue.size: 100000 | |
| scheduler.display.resource: false | |
| ### ui.* configs are for the master | |
| ui.host: 0.0.0.0 | |
| ui.port: 8080 | |
| ui.childopts: "-Xmx768m" | |
| ui.actions.enabled: true | |
| ui.filter: null | |
| ui.filter.params: null | |
| ui.users: null | |
| ui.header.buffer.bytes: 4096 | |
| ui.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin | |
| logviewer.port: 8000 | |
| logviewer.childopts: "-Xmx128m" | |
| logviewer.cleanup.age.mins: 10080 | |
| logviewer.appender.name: "A1" | |
| logviewer.max.sum.worker.logs.size.mb: 4096 | |
| logviewer.max.per.worker.logs.size.mb: 2048 | |
| logs.users: null | |
| drpc.port: 3772 | |
| drpc.worker.threads: 64 | |
| drpc.max_buffer_size: 1048576 | |
| drpc.queue.size: 128 | |
| drpc.invocations.port: 3773 | |
| drpc.invocations.threads: 64 | |
| drpc.request.timeout.secs: 600 | |
| drpc.childopts: "-Xmx768m" | |
| drpc.http.port: 3774 | |
| drpc.https.port: -1 | |
| drpc.https.keystore.password: "" | |
| drpc.https.keystore.type: "JKS" | |
| drpc.http.creds.plugin: org.apache.storm.security.auth.DefaultHttpCredentialsPlugin | |
| drpc.authorizer.acl.filename: "drpc-auth-acl.yaml" | |
| drpc.authorizer.acl.strict: false | |
| transactional.zookeeper.root: "/transactional" | |
| transactional.zookeeper.servers: null | |
| transactional.zookeeper.port: null | |
| ## blobstore configs | |
| supervisor.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore" | |
| supervisor.blobstore.download.thread.count: 5 | |
| supervisor.blobstore.download.max_retries: 3 | |
| supervisor.localizer.cache.target.size.mb: 10240 | |
| supervisor.localizer.cleanup.interval.ms: 30000 | |
| nimbus.blobstore.class: "org.apache.storm.blobstore.LocalFsBlobStore" | |
| nimbus.blobstore.expiration.secs: 600 | |
| storm.blobstore.inputstream.buffer.size.bytes: 65536 | |
| client.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore" | |
| storm.blobstore.replication.factor: 3 | |
| # For secure mode we would want to change this config to true | |
| storm.blobstore.acl.validation.enabled: false | |
| ### supervisor.* configs are for node supervisors | |
| # Define the amount of workers that can be run on this machine. Each worker is assigned a port to use for communication | |
| supervisor.slots.ports: | |
| - 6700 | |
| - 6701 | |
| - 6702 | |
| - 6703 | |
| supervisor.childopts: "-Xmx256m" | |
| supervisor.run.worker.as.user: false | |
| #how long supervisor will wait to ensure that a worker process is started | |
| supervisor.worker.start.timeout.secs: 120 | |
| #how long between heartbeats until supervisor considers that worker dead and tries to restart it | |
| supervisor.worker.timeout.secs: 30 | |
| #how many seconds to sleep for before shutting down threads on worker | |
| supervisor.worker.shutdown.sleep.secs: 3 | |
| #how frequently the supervisor checks on the status of the processes it's monitoring and restarts if necessary | |
| supervisor.monitor.frequency.secs: 3 | |
| #how frequently the supervisor heartbeats to the cluster state (for nimbus) | |
| supervisor.heartbeat.frequency.secs: 5 | |
| supervisor.enable: true | |
| supervisor.supervisors: [] | |
| supervisor.supervisors.commands: [] | |
| supervisor.memory.capacity.mb: 4096.0 | |
| #By convention 1 cpu core should be about 100, but this can be adjusted if needed | |
| # using 100 makes it simple to set the desired value to the capacity measurement | |
| # for single threaded bolts | |
| supervisor.cpu.capacity: 400.0 | |
| ### worker.* configs are for task workers | |
| worker.heap.memory.mb: 768 | |
| worker.childopts: "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump" | |
| worker.gc.childopts: "" | |
| # Unlocking commercial features requires a special license from Oracle. | |
| # See http://www.oracle.com/technetwork/java/javase/terms/products/index.html | |
| # For this reason, profiler features are disabled by default. | |
| worker.profiler.enabled: false | |
| worker.profiler.childopts: "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder" | |
| worker.profiler.command: "flight.bash" | |
| worker.heartbeat.frequency.secs: 1 | |
| # check whether dynamic log levels can be reset from DEBUG to INFO in workers | |
| worker.log.level.reset.poll.secs: 30 | |
| # control how many worker receiver threads we need per worker | |
| topology.worker.receiver.thread.count: 1 | |
| task.heartbeat.frequency.secs: 3 | |
| task.refresh.poll.secs: 10 | |
| task.credentials.poll.secs: 30 | |
| task.backpressure.poll.secs: 30 | |
| # now should be null by default | |
| topology.backpressure.enable: false | |
| backpressure.disruptor.high.watermark: 0.9 | |
| backpressure.disruptor.low.watermark: 0.4 | |
| backpressure.znode.timeout.secs: 30 | |
| backpressure.znode.update.freq.secs: 15 | |
| zmq.threads: 1 | |
| zmq.linger.millis: 5000 | |
| zmq.hwm: 0 | |
| storm.messaging.netty.server_worker_threads: 1 | |
| storm.messaging.netty.client_worker_threads: 1 | |
| storm.messaging.netty.buffer_size: 5242880 #5MB buffer | |
| # Since nimbus.task.launch.secs and supervisor.worker.start.timeout.secs are 120, other workers should also wait at least that long before giving up on connecting to the other worker. The reconnection period need also be bigger than storm.zookeeper.session.timeout(default is 20s), so that we can abort the reconnection when the target worker is dead. | |
| storm.messaging.netty.max_retries: 300 | |
| storm.messaging.netty.max_wait_ms: 1000 | |
| storm.messaging.netty.min_wait_ms: 100 | |
| # If the Netty messaging layer is busy(netty internal buffer not writable), the Netty client will try to batch message as more as possible up to the size of storm.messaging.netty.transfer.batch.size bytes, otherwise it will try to flush message as soon as possible to reduce latency. | |
| storm.messaging.netty.transfer.batch.size: 262144 | |
| # Sets the backlog value to specify when the channel binds to a local address | |
| storm.messaging.netty.socket.backlog: 500 | |
| # By default, the Netty SASL authentication is set to false. Users can override and set it true for a specific topology. | |
| storm.messaging.netty.authentication: false | |
| # Default plugin to use for automatic network topology discovery | |
| storm.network.topography.plugin: org.apache.storm.networktopography.DefaultRackDNSToSwitchMapping | |
| # default number of seconds group mapping service will cache user group | |
| storm.group.mapping.service.cache.duration.secs: 120 | |
| ### topology.* configs are for specific executing storms | |
| topology.enable.message.timeouts: true | |
| topology.debug: false | |
| topology.workers: 1 | |
| topology.acker.executors: null | |
| topology.eventlogger.executors: 0 | |
| topology.tasks: null | |
| # maximum amount of time a message has to complete before it's considered failed | |
| topology.message.timeout.secs: 30 | |
| topology.multilang.serializer: "org.apache.storm.multilang.JsonSerializer" | |
| topology.shellbolt.max.pending: 100 | |
| topology.skip.missing.kryo.registrations: false | |
| topology.max.task.parallelism: null | |
| topology.max.spout.pending: null | |
| topology.state.synchronization.timeout.secs: 60 | |
| topology.stats.sample.rate: 0.05 | |
| topology.builtin.metrics.bucket.size.secs: 60 | |
| topology.fall.back.on.java.serialization: true | |
| topology.worker.childopts: null | |
| topology.worker.logwriter.childopts: "-Xmx64m" | |
| topology.executor.receive.buffer.size: 1024 #batched | |
| topology.executor.send.buffer.size: 1024 #individual messages | |
| topology.transfer.buffer.size: 1024 # batched | |
| topology.tick.tuple.freq.secs: null | |
| topology.worker.shared.thread.pool.size: 4 | |
| topology.spout.wait.strategy: "org.apache.storm.spout.SleepSpoutWaitStrategy" | |
| topology.sleep.spout.wait.strategy.time.ms: 1 | |
| topology.error.throttle.interval.secs: 10 | |
| topology.max.error.report.per.interval: 5 | |
| topology.kryo.factory: "org.apache.storm.serialization.DefaultKryoFactory" | |
| topology.tuple.serializer: "org.apache.storm.serialization.types.ListDelegateSerializer" | |
| topology.trident.batch.emit.interval.millis: 500 | |
| topology.testing.always.try.serialize: false | |
| topology.classpath: null | |
| topology.environment: null | |
| topology.bolts.outgoing.overflow.buffer.enable: false | |
| topology.disruptor.wait.timeout.millis: 1000 | |
| topology.disruptor.batch.size: 100 | |
| topology.disruptor.batch.timeout.millis: 1 | |
| topology.disable.loadaware.messaging: false | |
| topology.state.checkpoint.interval.ms: 1000 | |
| topology.localityaware.higher.bound.percent: 0.8 | |
| topology.localityaware.lower.bound.percent: 0.2 | |
| topology.serialized.message.size.metrics: false | |
| # Configs for Resource Aware Scheduler | |
| # topology priority describing the importance of the topology in decreasing importance starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases). | |
| # Recommended range of 0-29 but no hard limit set. | |
| topology.priority: 29 | |
| topology.component.resources.onheap.memory.mb: 128.0 | |
| topology.component.resources.offheap.memory.mb: 0.0 | |
| topology.component.cpu.pcore.percent: 10.0 | |
| topology.worker.max.heap.size.mb: 768.0 | |
| topology.scheduler.strategy: "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy" | |
| resource.aware.scheduler.priority.strategy: "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy" | |
| blacklist.scheduler.tolerance.time.secs: 300 | |
| blacklist.scheduler.tolerance.count: 3 | |
| blacklist.scheduler.resume.time.secs: 1800 | |
| blacklist.scheduler.reporter: "org.apache.storm.scheduler.blacklist.reporters.LogReporter" | |
| blacklist.scheduler.strategy: "org.apache.storm.scheduler.blacklist.strategies.DefaultBlacklistStrategy" | |
| dev.zookeeper.path: "/tmp/dev-storm-zookeeper" | |
| pacemaker.servers: [] | |
| pacemaker.port: 6699 | |
| pacemaker.base.threads: 10 | |
| pacemaker.max.threads: 50 | |
| pacemaker.thread.timeout: 10 | |
| pacemaker.childopts: "-Xmx1024m" | |
| pacemaker.auth.method: "NONE" | |
| pacemaker.kerberos.users: [] | |
| pacemaker.thrift.message.size.max: 10485760 | |
| #default storm daemon metrics reporter plugins | |
| storm.daemon.metrics.reporter.plugins: | |
| - "org.apache.storm.daemon.metrics.reporters.JmxPreparableReporter" | |
| # configuration of cluster metrics consumer | |
| storm.cluster.metrics.consumer.publish.interval.secs: 60 | |
| storm.resource.isolation.plugin: "org.apache.storm.container.cgroup.CgroupManager" | |
| # Also determines whether the unit tests for cgroup runs. | |
| # If storm.resource.isolation.plugin.enable is set to false the unit tests for cgroups will not run | |
| storm.resource.isolation.plugin.enable: false | |
| storm.cgroup.memory.enforcement.enable: false | |
| # Configs for CGroup support | |
| storm.cgroup.hierarchy.dir: "/cgroup/storm_resources" | |
| storm.cgroup.resources: | |
| - "cpu" | |
| - "memory" | |
| storm.cgroup.hierarchy.name: "storm" | |
| storm.supervisor.cgroup.rootdir: "storm" | |
| storm.cgroup.cgexec.cmd: "/bin/cgexec" | |
| storm.cgroup.memory.limit.tolerance.margin.mb: 0.0 | |
| storm.supervisor.memory.limit.tolerance.margin.mb: 128.0 | |
| storm.supervisor.hard.memory.limit.multiplier: 2.0 | |
| storm.supervisor.hard.memory.limit.overage.mb: 2024 | |
| storm.supervisor.low.memory.threshold.mb: 1024 | |
| storm.supervisor.medium.memory.threshold.mb: 1536 | |
| storm.supervisor.medium.memory.grace.period.ms: 30000 | |
| storm.topology.classpath.beginning.enabled: false | |
| worker.metrics: | |
| "CGroupMemory": "org.apache.storm.metric.cgroup.CGroupMemoryUsage" | |
| "CGroupMemoryLimit": "org.apache.storm.metric.cgroup.CGroupMemoryLimit" | |
| "CGroupCpu": "org.apache.storm.metric.cgroup.CGroupCpu" | |
| "CGroupCpuGuarantee": "org.apache.storm.metric.cgroup.CGroupCpuGuarantee" | |
| num.stat.buckets: 20 |
storm一些可调节的参数的更多相关文章
- Storm里面fieldsGrouping和Field参数和 declareOutputFields
Fields,个人理解,类似于一张表,你取那些字段以及这些字段所对应的数据给后面的bolt用 这个Field通常和fieldsGrouping分组机制一起使用,这个Field特别难理解,我自己也是在网 ...
- linux 内核参数VM调优 之 参数调节和场景分析
1. pdflush刷新脏数据条件 (linux IO 内核参数调优 之 原理和参数介绍)上一章节讲述了IO内核调优介个重要参数参数. 总结可知cached中的脏数据满足如下几个条件中一个或者多个的时 ...
- (转)linux IO 内核参数调优 之 参数调节和场景分析
1. pdflush刷新脏数据条件 (linux IO 内核参数调优 之 原理和参数介绍)上一章节讲述了IO内核调优介个重要参数参数. 总结可知cached中的脏数据满足如下几个条件中一个或者多个的时 ...
- inux IO 内核参数调优 之 参数调节和场景分析
http://backend.blog.163.com/blog/static/2022941262013112081215609/ http://blog.csdn.net/icycode/arti ...
- 线上Storm的worker,executor,task参数调优篇
问题引入: 线上最近的数据量越来越大,出现了数据处理延迟的现象,观察storm ui的各项数据,发现有大量的spout失败的情况,如下: ------------------------------- ...
- Spark Shuffle原理、Shuffle操作问题解决和参数调优
摘要: 1 shuffle原理 1.1 mapreduce的shuffle原理 1.1.1 map task端操作 1.1.2 reduce task端操作 1.2 spark现在的SortShuff ...
- /proc/sys/vm/参数
1) /proc/sys/vm/block_dump该文件表示是否打开Block Debug模式,用于记录所有的读写及Dirty Block写回动作.缺省设置:0,禁用Block Debug模式2) ...
- Storm集成Kafka应用的开发
我们知道storm的作用主要是进行流式计算,对于源源不断的均匀数据流流入处理是非常有效的,而现实生活中大部分场景并不是均匀的数据流,而是时而多时而少的数据流入,这种情况下显然用批量处理是不合适的,如果 ...
- Storm入门学习随记
推荐慕课网视频:http://www.imooc.com/video/10055 ====Storm的起源. Storm是开源的.分布式.流式计算系统 什么是分布式呢?就是将一个任务拆解给多个计算机去 ...
随机推荐
- Extremely fast hash algorithm-xxHash
xxHash - Extremely fast hash algorithm xxHash is an Extremely fast Hash algorithm, running at RAM sp ...
- python中map函数的用法
map函数类似一个生成器 具体用例如下: def add(x): a =[,,] b = map(add,[,,]) print( list(map(add,[,,])) ) print(b,type ...
- chrome的驱动安装
首先找到对应的chromedriver,百度搜索,http://npm.taobao.org/mirrors/chromedriver/ 将下载好的chrome驱动解压,放在/usr/loacl/bi ...
- CommonsMultipartResolver 文件上传
依赖 <dependency> <groupId>commons-fileupload</groupId> <artifactId>commons-fi ...
- C++——类的多继承
多继承概念 一个类有多个直接基类的继承关系称为多继承 多继承声明语法 class 派生类名 : 访问控制 基类名1 , 访问控制 基类名2 , … , 访问控制 基类名n { 数据成员和成 ...
- memcpy函数实现中的优化
今天浏览Google面试题的时候,有看到一个memcpy的实现,以及如何去优化memcpy. 我对memcpy的实现的记忆就是,拷贝的时候需要从后往前拷贝,为何防止内存重叠. 但是如果去优化它我没有想 ...
- iOS开发之SceneKit框架--SCNNode.h
1.SCNNode简介 SCNNode是场景图的结构元素,表示3D坐标空间中的位置和变换,您可以将模型,灯光,相机或其他可显示内容附加到该元素.也可以对其做动画. 2.相关API简介 初始化方法 // ...
- 十条服务器端优化Web性能的技巧总结
原文地址:http://www.jb51.net/yunying/452723.html 提高 web 应用的性能从来没有比现在更重要过.网络经济的比重一直在增长:全球经济超过 5% 的价值是在因特网 ...
- js input框限制输入为数字并限制长度
<input type="number" name="price" id="priceVal" placeholder="请 ...
- [NOIP2019模拟赛]HC1147 时空阵
题目描述: 幽香这几天学习了魔法,准备建造一个大型的时空传送阵. 幽香现在可以在幻想乡的n个地点建造一些传送门,如果她建造了从地点a与地点b之间的传送门,那么从a到b和从b到a都只需要单位1的时间. ...