ConsumerRecords<String, String> records = consumer.poll(100); 

 

/**
* Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. It is an error to not have
* subscribed to any topics or partitions before polling for data.
* <p>
* On each poll, consumer will try to use the last consumed offset as the starting offset and fetch sequentially. The last
* consumed offset can be manually set through {@link #seek(TopicPartition, long)} or automatically set as the last committed
* offset for the subscribed list of partitions
*
*
* @param timeout The time, in milliseconds, spent waiting in poll if data is not available in the buffer.
* If 0, returns immediately with any records that are available currently in the buffer, else returns empty.
* Must not be negative.
* @return map of topic to records since the last fetch for the subscribed list of topics and partitions
*
* @throws org.apache.kafka.clients.consumer.InvalidOffsetException if the offset for a partition or set of
* partitions is undefined or out of range and no offset reset policy has been configured
* @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
* function is called
* @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
* this function is called
* @throws org.apache.kafka.common.errors.AuthorizationException if caller lacks Read access to any of the subscribed
* topics or to the configured groupId
* @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors (e.g. invalid groupId or
* session timeout, errors deserializing key/value pairs, or any new error cases in future versions)
* @throws java.lang.IllegalArgumentException if the timeout value is negative
* @throws java.lang.IllegalStateException if the consumer is not subscribed to any topics or manually assigned any
* partitions to consume from
*/
@Override
public ConsumerRecords<K, V> poll(long timeout) {
try {
if (timeout < 0)
throw new IllegalArgumentException("Timeout must not be negative"); if (this.subscriptions.hasNoSubscriptionOrUserAssignment())
throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions"); // poll for new data until the timeout expires
long start = time.milliseconds();
long remaining = timeout;
do {
Map<TopicPartition, List<ConsumerRecord<K, V>>> records = pollOnce(remaining); //pollOnce
if (!records.isEmpty()) {
// before returning the fetched records, we can send off the next round of fetches
// and avoid block waiting for their responses to enable pipelining while the user
// is handling the fetched records.
//
// NOTE: since the consumed position has already been updated, we must not allow
// wakeups or any other errors to be triggered prior to returning the fetched records.
if (fetcher.sendFetches() > 0) { //为了省时间,预先放fetch一次
client.pollNoWakeup();
} if (this.interceptors == null)
return new ConsumerRecords<>(records);
else
return this.interceptors.onConsume(new ConsumerRecords<>(records)); //如果有interceptors,先处理一下
} long elapsed = time.milliseconds() - start;
remaining = timeout - elapsed; //在超时内,反复尝试poll
} while (remaining > 0); return ConsumerRecords.empty(); //如果数据不ready,返回empty
} finally {
release();
}
}

 

pollOnce

/**
* Do one round of polling. In addition to checking for new data, this does any needed offset commits
* (if auto-commit is enabled), and offset resets (if an offset reset policy is defined).
* @param timeout The maximum time to block in the underlying call to {@link ConsumerNetworkClient#poll(long)}.
* @return The fetched records (may be empty)
*/
private Map<TopicPartition, List<ConsumerRecord<K, V>>> pollOnce(long timeout) {
coordinator.poll(time.milliseconds()); //和ConsuemrCoordinator之间的心跳 // fetch positions if we have partitions we're subscribed to that we
// don't know the offset for
if (!subscriptions.hasAllFetchPositions())
updateFetchPositions(this.subscriptions.missingFetchPositions()); //同步offset // if data is available already, return it immediately
Map<TopicPartition, List<ConsumerRecord<K, V>>> records = fetcher.fetchedRecords(); //已经有fetched
if (!records.isEmpty())
return records; //直接返回 // send any new fetches (won't resend pending fetches)
fetcher.sendFetches(); //没有现成的数据,发送fetch命令 long now = time.milliseconds();
long pollTimeout = Math.min(coordinator.timeToNextPoll(now), timeout); client.poll(pollTimeout, now, new PollCondition() {
@Override
public boolean shouldBlock() {
// since a fetch might be completed by the background thread, we need this poll condition
// to ensure that we do not block unnecessarily in poll()
return !fetcher.hasCompletedFetches();
}
}); // after the long poll, we should check whether the group needs to rebalance
// prior to returning data so that the group can stabilize faster
if (coordinator.needRejoin())
return Collections.emptyMap(); return fetcher.fetchedRecords();
}

 

看下fetcher

public Fetcher(ConsumerNetworkClient client,
int minBytes,
int maxBytes,
int maxWaitMs,
int fetchSize,
int maxPollRecords,
boolean checkCrcs,
Deserializer<K> keyDeserializer,
Deserializer<V> valueDeserializer,
Metadata metadata,
SubscriptionState subscriptions,
Metrics metrics,
String metricGrpPrefix,
Time time,
long retryBackoffMs) {

创建时,

this.fetcher = new Fetcher<>(this.client,
config.getInt(ConsumerConfig.FETCH_MIN_BYTES_CONFIG),
config.getInt(ConsumerConfig.FETCH_MAX_BYTES_CONFIG),
config.getInt(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG),
config.getInt(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG),
config.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG),
config.getBoolean(ConsumerConfig.CHECK_CRCS_CONFIG),
this.keyDeserializer,
this.valueDeserializer,
this.metadata,
this.subscriptions,
metrics,
metricGrpPrefix,
this.time,
this.retryBackoffMs);

可以看出对应的配置

 

fetcher.fetchedRecords

/**
* Return the fetched records, empty the record buffer and update the consumed position.
*
* NOTE: returning empty records guarantees the consumed position are NOT updated.
*
* @return The fetched records per partition
* @throws OffsetOutOfRangeException If there is OffsetOutOfRange error in fetchResponse and
* the defaultResetPolicy is NONE
*/
public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() {
Map<TopicPartition, List<ConsumerRecord<K, V>>> drained = new HashMap<>();
int recordsRemaining = maxPollRecords; //最大poll records数 while (recordsRemaining > 0) {
if (nextInLineRecords == null || nextInLineRecords.isDrained()) { //如果nextInLineRecords是空的,没有records的
CompletedFetch completedFetch = completedFetches.poll(); //从completedFetches,fetched队列中取一个fetch
if (completedFetch == null)
break; nextInLineRecords = parseFetchedData(completedFetch); //parse Fetch到nextInLineRecords中
} else {
TopicPartition partition = nextInLineRecords.partition; List<ConsumerRecord<K, V>> records = drainRecords(nextInLineRecords, recordsRemaining); //从nextInLineRecords取recordsRemaining个records
if (!records.isEmpty()) {
List<ConsumerRecord<K, V>> currentRecords = drained.get(partition); //取出partition对应的record list
if (currentRecords == null) {
drained.put(partition, records); //放入record list
} else {
// this case shouldn't usually happen because we only send one fetch at a time per partition,
// but it might conceivably happen in some rare cases (such as partition leader changes).
// we have to copy to a new list because the old one may be immutable
List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size());
newRecords.addAll(currentRecords);
newRecords.addAll(records);
drained.put(partition, newRecords);
}
recordsRemaining -= records.size();
}
}
} return drained; //返回
}

可以看到fetchedRecords只是从已经完成的fetch中读取数据

 

fetcher.sendFetches

先看

createFetchRequests
/**
* Create fetch requests for all nodes for which we have assigned partitions
* that have no existing requests in flight.
*/
private Map<Node, FetchRequest> createFetchRequests() {
// create the fetch info
Cluster cluster = metadata.fetch();
Map<Node, LinkedHashMap<TopicPartition, FetchRequest.PartitionData>> fetchable = new LinkedHashMap<>();
for (TopicPartition partition : fetchablePartitions()) {
Node node = cluster.leaderFor(partition); //找到partition的leader所在node
if (node == null) {
metadata.requestUpdate();
} else if (this.client.pendingRequestCount(node) == 0) { //如果没有正在进行的fetch,一个partition同时只能有一个fetch请求
// if there is a leader and no in-flight requests, issue a new fetch
LinkedHashMap<TopicPartition, FetchRequest.PartitionData> fetch = fetchable.get(node);
if (fetch == null) {
fetch = new LinkedHashMap<>();
fetchable.put(node, fetch);
} long position = this.subscriptions.position(partition);
fetch.put(partition, new FetchRequest.PartitionData(position, this.fetchSize)); //创建FetchRequest,position,从哪儿开始读,fetchSize,读多少
log.trace("Added fetch request for partition {} at offset {}", partition, position);
} else {
log.trace("Skipping fetch for partition {} because there is an in-flight request to {}", partition, node);
}
} // create the fetches
Map<Node, FetchRequest> requests = new HashMap<>();
for (Map.Entry<Node, LinkedHashMap<TopicPartition, FetchRequest.PartitionData>> entry : fetchable.entrySet()) {
Node node = entry.getKey();
FetchRequest fetch = new FetchRequest(this.maxWaitMs, this.minBytes, this.maxBytes, entry.getValue()); //封装成FetchRequest
requests.put(node, fetch);
}
return requests;
}

 

   /**
* Set-up a fetch request for any node that we have assigned partitions for which doesn't already have
* an in-flight fetch or pending fetch data.
* @return number of fetches sent
*/
public int sendFetches() {
Map<Node, FetchRequest> fetchRequestMap = createFetchRequests(); //创建Fetch Request
for (Map.Entry<Node, FetchRequest> fetchEntry : fetchRequestMap.entrySet()) {
final FetchRequest request = fetchEntry.getValue();
final Node fetchTarget = fetchEntry.getKey(); client.send(fetchTarget, ApiKeys.FETCH, request) //send request
.addListener(new RequestFutureListener<ClientResponse>() {
@Override
public void onSuccess(ClientResponse resp) { //如果成功
FetchResponse response = (FetchResponse) resp.responseBody();
Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) {
TopicPartition partition = entry.getKey();
long fetchOffset = request.fetchData().get(partition).offset;
FetchResponse.PartitionData fetchData = entry.getValue();
completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator)); //把fetchData封装成CompletedFetch,加入completedFetcheslist
} sensors.fetchLatency.record(resp.requestLatencyMs());
sensors.fetchThrottleTimeSensor.record(response.getThrottleTime());
} @Override
public void onFailure(RuntimeException e) {
log.debug("Fetch request to {} failed", fetchTarget, e);
}
});
}
return fetchRequestMap.size();
}

 

client.send

ConsumerNetworkClient
/**
* Send a new request. Note that the request is not actually transmitted on the
* network until one of the {@link #poll(long)} variants is invoked. At this
* point the request will either be transmitted successfully or will fail.
* Use the returned future to obtain the result of the send. Note that there is no
* need to check for disconnects explicitly on the {@link ClientResponse} object;
* instead, the future will be failed with a {@link DisconnectException}.
* @param node The destination of the request
* @param api The Kafka API call
* @param request The request payload
* @return A future which indicates the result of the send.
*/
public RequestFuture<ClientResponse> send(Node node,
ApiKeys api,
AbstractRequest request) {
return send(node, api, ProtoUtils.latestVersion(api.id), request);
} private RequestFuture<ClientResponse> send(Node node,
ApiKeys api,
short version,
AbstractRequest request) {
long now = time.milliseconds();
RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler();
RequestHeader header = client.nextRequestHeader(api, version);
ClientRequest clientRequest = new ClientRequest(node.idString(), now, true, header, request, completionHandler); //封装成client request
put(node, clientRequest); //没有真正发出,而是放入list // wakeup the client in case it is blocking in poll so that we can send the queued request
client.wakeup();
return completionHandler.future;
} private void put(Node node, ClientRequest request) {
synchronized (this) {
List<ClientRequest> nodeUnsent = unsent.get(node);
if (nodeUnsent == null) {
nodeUnsent = new ArrayList<>();
unsent.put(node, nodeUnsent);
}
nodeUnsent.add(request);
}
}

 

NetworkClient.wakeup

/**
* Interrupt the client if it is blocked waiting on I/O.
*/
@Override
public void wakeup() {
this.selector.wakeup();
}

wakeup就是让client从selector的block等待中,被唤醒,可以处理其他的请求

 

这里说了,只有当poll被调用的时候,才会真正的将request发送出去,poll是在哪儿被调用的?

 

在上面pollOnce的时候,有这样的逻辑

        client.poll(pollTimeout, now, new PollCondition() {
@Override
public boolean shouldBlock() {
// since a fetch might be completed by the background thread, we need this poll condition
// to ensure that we do not block unnecessarily in poll()
return !fetcher.hasCompletedFetches();
}
});

意思是调用poll的超时是pollTimeout,

PollCondition.shouldBlock,意思是何时我们需要block等待,当hasCompletedFetches时,是不需要等数据的,所以只有当没有现成的数据的时候,才需要等

 

ConsumerNetworkClient.poll

/**
* Poll for any network IO.
* @param timeout timeout in milliseconds
* @param now current time in milliseconds
*/
public void poll(long timeout, long now, PollCondition pollCondition) {
// there may be handlers which need to be invoked if we woke up the previous call to poll
firePendingCompletedRequests(); synchronized (this) {
// send all the requests we can send now
trySend(now); // check whether the poll is still needed by the caller. Note that if the expected completion
// condition becomes satisfied after the call to shouldBlock() (because of a fired completion
// handler), the client will be woken up.
if (pollCondition == null || pollCondition.shouldBlock()) {
// if there are no requests in flight, do not block longer than the retry backoff
if (client.inFlightRequestCount() == 0)
timeout = Math.min(timeout, retryBackoffMs);
client.poll(Math.min(MAX_POLL_TIMEOUT_MS, timeout), now);
now = time.milliseconds();
} else {
client.poll(0, now);
} // handle any disconnects by failing the active requests. note that disconnects must
// be checked immediately following poll since any subsequent call to client.ready()
// will reset the disconnect status
checkDisconnects(now); // trigger wakeups after checking for disconnects so that the callbacks will be ready
// to be fired on the next call to poll()
maybeTriggerWakeup(); // throw InterruptException if this thread is interrupted
maybeThrowInterruptException(); // try again to send requests since buffer space may have been
// cleared or a connect finished in the poll
trySend(now); // fail requests that couldn't be sent if they have expired
failExpiredRequests(now);
} // called without the lock to avoid deadlock potential if handlers need to acquire locks
firePendingCompletedRequests();
}

 

trySend

private boolean trySend(long now) {
// send any requests that can be sent now
boolean requestsSent = false;
for (Map.Entry<Node, List<ClientRequest>> requestEntry: unsent.entrySet()) { // 前面send的时候时候,request放入unsent
Node node = requestEntry.getKey();
Iterator<ClientRequest> iterator = requestEntry.getValue().iterator();
while (iterator.hasNext()) {
ClientRequest request = iterator.next();
if (client.ready(node, now)) {// Begin connecting to the given node, return true if we are already connected and ready to send to that node
client.send(request, now); // 调用send,发送request
iterator.remove();
requestsSent = true;
}
}
}
return requestsSent;
}

 

NetworkClient.send

/**
* Queue up the given request for sending. Requests can only be sent out to ready nodes.
* @param request The request
* @param now The current timestamp
*/
@Override
public void send(ClientRequest request, long now) {
doSend(request, false, now);
}

 

private void doSend(ClientRequest request, boolean isInternalRequest, long now) {
String nodeId = request.destination();
if (request.header().apiKey() == ApiKeys.API_VERSIONS.id) {
if (!canSendApiVersionsRequest(nodeId))
throw new IllegalStateException("Attempt to send API Versions request to node " + nodeId + " which is not ready.");
} else if (!canSendRequest(nodeId))
throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready."); Send send = request.body().toSend(nodeId, request.header());
InFlightRequest inFlightRequest = new InFlightRequest(
request.header(),
request.createdTimeMs(),
request.destination(),
request.callback(),
request.expectResponse(),
isInternalRequest,
send,
now); this.inFlightRequests.add(inFlightRequest); // 加入inFlightRequest
selector.send(inFlightRequest.send);
}

最终用selector.send来发送Send

/**
* Queue the given request for sending in the subsequent {@link #poll(long)} calls
* @param send The request to send
*/
public void send(Send send) {
String connectionId = send.destination();
if (closingChannels.containsKey(connectionId))
this.failedSends.add(connectionId);
else {
KafkaChannel channel = channelOrFail(connectionId, false); // 从Map<String, KafkaChannel> channels中get该connect对应的channel
            try {
channel.setSend(send);
} catch (CancelledKeyException e) {
this.failedSends.add(connectionId);
close(channel, false);
}
}
}

KafkaChannel.setSend

public void setSend(Send send) {
if (this.send != null)
throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress.");
this.send = send;
this.transportLayer.addInterestOps(SelectionKey.OP_WRITE);
}

可以看到select.send也只是把send放到channel中,

真正发送要等到调用NetworkClient.poll

在ConsumerNetworkClient.poll中,

            if (pollCondition == null || pollCondition.shouldBlock()) {
// if there are no requests in flight, do not block longer than the retry backoff
if (client.inFlightRequestCount() == 0)
timeout = Math.min(timeout, retryBackoffMs);
client.poll(Math.min(MAX_POLL_TIMEOUT_MS, timeout), now);
now = time.milliseconds();
} else {
client.poll(0, now);
}

如果需要block或没有pollCondition,选择block timeout来等待数据

否则调用client.poll(0, now),意思是没有数据即刻返回

NetworkClient.poll

@Override
public void poll(long timeout) throws IOException {
if (timeout < 0)
throw new IllegalArgumentException("timeout should be >= 0"); clear(); if (hasStagedReceives() || !immediatelyConnectedKeys.isEmpty())
timeout = 0; /* check ready keys */
long startSelect = time.nanoseconds();
int readyKeys = select(timeout);
long endSelect = time.nanoseconds();
this.sensors.selectTime.record(endSelect - startSelect, time.milliseconds()); if (readyKeys > 0 || !immediatelyConnectedKeys.isEmpty()) {
pollSelectionKeys(this.nioSelector.selectedKeys(), false, endSelect);
pollSelectionKeys(immediatelyConnectedKeys, true, endSelect);
} addToCompletedReceives(); long endIo = time.nanoseconds();
this.sensors.ioTime.record(endIo - endSelect, time.milliseconds()); // we use the time at the end of select to ensure that we don't close any connections that
// have just been processed in pollSelectionKeys
maybeCloseOldestConnection(endSelect);
}

 

select

    private int select(long ms) throws IOException {
if (ms < 0L)
throw new IllegalArgumentException("timeout should be >= 0"); if (ms == 0L)
return this.nioSelector.selectNow();
else
return this.nioSelector.select(ms);
}

 

pollSelectionKeys

    private void pollSelectionKeys(Iterable<SelectionKey> selectionKeys,
boolean isImmediatelyConnected,
long currentTimeNanos) {
Iterator<SelectionKey> iterator = selectionKeys.iterator();
while (iterator.hasNext()) {
SelectionKey key = iterator.next();
iterator.remove();
KafkaChannel channel = channel(key); try { /* complete any connections that have finished their handshake (either normally or immediately) */
if (isImmediatelyConnected || key.isConnectable()) {
if (channel.finishConnect()) {
this.connected.add(channel.id());
this.sensors.connectionCreated.record();
SocketChannel socketChannel = (SocketChannel) key.channel();
log.debug("Created socket with SO_RCVBUF = {}, SO_SNDBUF = {}, SO_TIMEOUT = {} to node {}",
socketChannel.socket().getReceiveBufferSize(),
socketChannel.socket().getSendBufferSize(),
socketChannel.socket().getSoTimeout(),
channel.id());
} else
continue;
} /* if channel is not ready finish prepare */
if (channel.isConnected() && !channel.ready())
channel.prepare(); /* if channel is ready read from any connections that have readable data */
if (channel.ready() && key.isReadable() && !hasStagedReceive(channel)) {
NetworkReceive networkReceive;
while ((networkReceive = channel.read()) != null)
addToStagedReceives(channel, networkReceive);
} /* if channel is ready write to any sockets that have space in their buffer and for which we have data */
if (channel.ready() && key.isWritable()) {
Send send = channel.write(); //真正写出数据
if (send != null) {
this.completedSends.add(send);
}
} /* cancel any defunct sockets */
if (!key.isValid())
close(channel, true); } catch (Exception e) { }
}
}

 

直接用NIO写应用,是需要勇气的

Kafka – kafka consumer的更多相关文章

  1. [Kafka] - Kafka Java Consumer实现(一)

    Kafka提供了两种Consumer API,分别是:High Level Consumer API 和 Lower Level Consumer API(Simple Consumer API) H ...

  2. 关于Kafka 的 consumer 消费者处理的一些见解

    前言 在上一篇 Kafka使用Java实现数据的生产和消费demo 中介绍如何简单的使用kafka进行数据传输.本篇则重点介绍kafka中的 consumer 消费者的讲解. 应用场景 在上一篇kaf ...

  3. Kafka Producer Consumer

    Producer API org.apache.kafka.clients.producer.KafkaProducer props.put("bootstrap.servers" ...

  4. CDH下集成spark2.2.0与kafka(四十一):在spark+kafka流处理程序中抛出错误java.lang.NoSuchMethodError: org.apache.kafka.clients.consumer.KafkaConsumer.subscribe(Ljava/util/Collection;)V

    错误信息 19/01/15 19:36:40 WARN consumer.ConsumerConfig: The configuration max.poll.records = 1 was supp ...

  5. 关于Kafka java consumer管理TCP连接的讨论

    本篇是<关于Kafka producer管理TCP连接的讨论>的续篇,主要讨论Kafka java consumer是如何管理TCP连接.实际上,这两篇大部分的内容是相同的,即consum ...

  6. [Big Data - Kafka] Kafka设计解析(四):Kafka Consumer解析

    High Level Consumer 很多时候,客户程序只是希望从Kafka读取数据,不太关心消息offset的处理.同时也希望提供一些语义,例如同一条消息只被某一个Consumer消费(单播)或被 ...

  7. object not serializable (class: org.apache.kafka.clients.consumer.ConsumerRecord)

    3. object not serializable (class: org.apache.kafka.clients.consumer.ConsumerRecord)   val stream = ...

  8. [Kafka] - Kafka Java Consumer实现(二)

    Kafka提供了两种Consumer API,分别是:High Level Consumer API 和 Lower Level Consumer API(Simple Consumer API) H ...

  9. 【Kafka】Consumer配置

    从0.9.0.0开始,下面是消费者的配置. 名称 描述 类型 默认值 bootstrap.servers 消费者初始连接kafka集群时的地址列表.不管这边配置的什么地址,消费者会使用所有的kafka ...

随机推荐

  1. GSSAPIAuthentication=no

    GSSAPI ( Generic Security Services Application Programming Interface) 是一套类似Kerberos 5的通用网络安全系统接口.该接口 ...

  2. Unity的Attribute(特性)还算多吧

    属性 (Attribute) 使用 Unity 的C#语言 ,利用属性(Attribute)来类定义和变量定义或区分其他的变量,您可以设置一种特殊行为.* 1 例如,您添加[SerializeFiel ...

  3. 【iCore4 双核心板_ARM】例程十二:通用定时器实验——定时点亮LED

    实验原理: 通过STM32的三个GPIO口来驱动LED灯的三个通道,设定GPIO为推挽输出模式,采 用灌电流的方式与LED连接,输出高电平LED灭,输出低电平LED亮,通过通用定时器TIM3 实现50 ...

  4. [echarts] 同指标对比柱状图

    需求:对比课程通过率最高的8个课程和最低的8个课程以及全校的平均通过率 http://echarts.baidu.com/echarts2/doc/example/bar1.html option = ...

  5. Go指南练习_斐波纳契闭包

    源地址 https://tour.go-zh.org/moretypes/26 一.题目描述 让我们用函数做些好玩的事情. 实现一个 fibonacci 函数,它返回一个函数(闭包),该闭包返回一个斐 ...

  6. Git 修改远端仓库地址

    方法有三种:1.修改命令git remote set-url origin [url] 例如:git remote set-url origin gitlab@gitlab.chumob.com:ph ...

  7. TensorFlow Jupyter Notebook 和matplotlib安装配置

    Jupyter Notebook 和matplotlib Jupyter Notebook安装 Python 3 : python3 -m pip install --upgrade pip pyth ...

  8. [ASP.NET MVC]视图是如何呈现的

    为了搞清楚ASP.NET MVC的请求过程,我们计划从结果追踪到源头.使用VS2012创建一个空白的ASP.NET MVC项目 然后创建一个HelloController 创建一个HelloView. ...

  9. VMWare共有3种网络连接模式

     VMWare共有3种网络连接模式,分别是: 1. bridged(桥接模式):虚拟机将直接连接到物理局域网,使自身独立于宿主机外,从局域网路由器获取IP.这种方式虚拟OS可以和局域网中其他终端实现互 ...

  10. celery 原理理解

    这里有一篇写的不错的:http://www.jianshu.com/p/1840035cb510 自己的“格式化”后的内容备忘下: 我们总在说c10k的问题, 也做了不少优化, 然后优化总是不够的. ...