下载剧本

下载后会在给定目录生成多pdf文件,文件名为每一节的名称

#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# @Time : 2019/11/18 下午10:48
# @Author : yon
# @Email : 2012@qq.com
# @File : day1.py import os
import re
import time
import logging
import pdfkit
from bs4 import BeautifulSoup
import requests def gethtml(url):
targeturl = url
filepath = '/home/yon/Desktop/pdf/'
headers = {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
'Cache-Control': 'no-cache',
'accept-encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
'Referer': 'https://www.google.com/'
}
resp = requests.get(targeturl, headers=headers)
soup = BeautifulSoup(resp.content, "html.parser")
txt = soup.find("article")
title = filepath + txt.h1.text.replace(" ", "") + ".pdf"
# print(title)
pdfkit.from_string(str(txt), title) if __name__ == '__main__':
# gethtml("https://www.thisamericanlife.org/664/transcript")
for number in range(665, 687):
urltoget = "https://www.thisamericanlife.org/" + str(number) + "/transcript"
gethtml(urltoget)
time.sleep(10)

下载MP3

对于不提供下载的剧集,可以先播放,然后打开工具看源代码,搜索MP3 ,对该地址右键打开新标签下载

翻译


#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/31/19 11:06 AM
# @Author : yon
# @Email : @qq.com
# @File : tt.py from selenium import webdriver
import random
import os
import re
import time
import logging
from bs4 import BeautifulSoup
import requests
import pdfkit def translate(html):
url = "https://fanyi.baidu.com"
driver = webdriver.Chrome()
dr = driver.get(url)
driver.refresh()
jj = []
jj[0] = '<head><meta charset="UTF-8"></head>'
rr = ''
try:
for gg in html:
inputtext = driver.find_element_by_class_name("textarea")
inputtext.clear()
inputtext.send_keys(gg)
time.sleep(random.uniform(2, 3))
outtext = driver.find_element_by_class_name("target-output")
jj.append(str(gg))
jj.append(outtext.text)
except Exception as e:
print("出错了")
finally:
driver.close() with open("/home/baixiaoxu/桌面/pdf/tt.html", mode='w') as filename:
for l in jj:
filename.write(str(l))
filename.write("\n") def gethtml(url):
targeturl = url
headers = {
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
'Cache-Control': 'no-cache',
'accept-encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
'Referer': 'https://www.google.com/'
}
resp = requests.get(targeturl, headers=headers)
soup = BeautifulSoup(resp.content, "html.parser")
txt = soup.find("article")
ss = BeautifulSoup(str(txt), "html.parser")
title1 = txt.h1.text.replace(" ", "")
ll = ss.findAll("p")
temp = []
temp_h4 = ''
for x in ll:
h4_temp = x.find_previous_sibling("h4")
if h4_temp != temp_h4 and h4_temp is not None:
temp.append(str(h4_temp))
temp.append(str(x))
temp_h4 = h4_temp
else:
temp.append(str(x)) body1 = {"title": title1, "content": temp} return body1 def createpdf(title1):
filepath = "/home/baixiaoxu/桌面/pdf/"
# cc = BeautifulSoup(html, "html.parser")
pdfkit.from_file("/home/baixiaoxu/桌面/pdf/tt.html", filepath + title1 + ".pdf") if __name__ == '__main__':
# url1 ="https://www.thisamericanlife.org/687/transcript"
# contentdic = gethtml(url1)
# title = contentdic["title"]
# body = contentdic["content"]
# translate(body)
createpdf("ttttt")

正式版

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/31/19 11:06 AM
# @Author : yon
# @Email : 2012@qq.com
# @File : tt.py from selenium import webdriver
import random
import os
import re
import time
import logging
from bs4 import BeautifulSoup
import requests
import pdfkit def translate(html):
url = "https://fanyi.baidu.com"
driver = webdriver.Chrome()
dr = driver.get(url)
driver.refresh()
jj = []
jj.append('<head><meta charset="UTF-8"></head>')
rr = ''
try:
for gg in html:
inputtext = driver.find_element_by_class_name("textarea")
inputtext.clear()
inputtext.send_keys(gg)
time.sleep(random.uniform(2, 3))
outtext = driver.find_element_by_class_name("target-output")
jj.append(str(gg))
jj.append(outtext.text)
except Exception as e:
print("出错了")
finally:
driver.close() for i in jj:
rr = rr + str(i) + "\n"
return rr # with open("/home/baixiaoxu/桌面/pdf/tt.html", mode='w') as filename:
# for l in jj:
# filename.write(str(l))
# filename.write("\n") def gethtml(url):
targeturl = url
headers = {
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
'Cache-Control': 'no-cache',
'accept-encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
'Referer': 'https://www.google.com/'
}
resp = requests.get(targeturl, headers=headers)
soup = BeautifulSoup(resp.content, "html.parser")
txt = soup.find("article")
ss = BeautifulSoup(str(txt), "html.parser")
title1 = txt.h1.text.replace(" ", "")
ll = ss.findAll("p")
temp = []
temp_h4 = ''
for x in ll:
h4_temp = x.find_previous_sibling("h4")
if h4_temp != temp_h4 and h4_temp is not None:
temp.append(str(h4_temp))
temp.append(str(x))
temp_h4 = h4_temp
else:
temp.append(str(x)) body1 = {"title": title1, "content": temp} return body1 def createpdf(pdfhtml1, pdftitle):
filepath = "/home/baixiaoxu/桌面/pdf/"
pdfkit.from_string(pdfhtml1, filepath + pdftitle + ".pdf") if __name__ == '__main__':
url1 ="https://www.thisamericanlife.org/689/transcript"
contentdic = gethtml(url1)
title = contentdic["title"]
body = contentdic["content"]
xx = translate(body)
createpdf(xx, title)

腾讯翻译

腾讯翻译没有百度翻译好用,会有报错二退出,根据错误调整了try 位置

from selenium import webdriver
import random
import time
from bs4 import BeautifulSoup
import requests
import pdfkit def translate(html):
url = "https://fanyi.qq.com/"
driver = webdriver.Chrome()
dr = driver.get(url)
driver.refresh()
html_translate_list = []
html_translate_list.append('<head><meta charset="UTF-8"></head>')
translated_string = ''
# try:
# for gg in html:
# inputtext = driver.find_elements_by_class_name("textinput")[0]
# inputtext.clear()
# inputtext.send_keys(gg)
# time.sleep(random.uniform(4, 6))
# outtext = driver.find_element_by_class_name("text-dst")
# html_translate_list.append(str(gg))
# html_translate_list.append(outtext.text)
# except Exception as e:
# print("翻译出错了")
# finally:
# driver.close()
for gg in html:
try:
inputtext = driver.find_elements_by_class_name("textinput")[0]
inputtext.clear()
inputtext.send_keys(gg)
time.sleep(random.uniform(4, 6))
outtext = driver.find_element_by_class_name("text-dst")
html_translate_list.append(str(gg))
html_translate_list.append(outtext.text)
except Exception as e:
html_translate_list.append(str(gg))
html_translate_list.append("not translated") driver.close()
for i in html_translate_list:
translated_string = translated_string + str(i) + "\n"
return translated_string def gethtml(url):
targeturl = url
headers = {
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7',
'Cache-Control': 'no-cache',
'accept-encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
'Referer': 'https://www.google.com/'
}
resp = requests.get(targeturl, headers=headers)
soup = BeautifulSoup(resp.content, "html.parser")
txt = soup.find("article")
ss = BeautifulSoup(str(txt), "html.parser")
title1 = txt.find_all_next("h1")[0].text.replace(" ","")
ll = ss.findAll("p")
temp = []
temp_h4 = ''
for x in ll:
h4_temp = x.find_previous_sibling("h4")
if h4_temp != temp_h4 and h4_temp is not None:
temp.append(str(h4_temp))
temp.append(str(x))
temp_h4 = h4_temp
else:
temp.append(str(x)) body1 = {"title": title1, "content": temp} return body1 def createpdf(pdfhtml1, pdftitle):
filepath = "/home/yon/Desktop/"
pdfkit.from_string(pdfhtml1, filepath + pdftitle + ".pdf") if __name__ == '__main__':
url1 ="https://www.thisamericanlife.org/691/transcript"
contentdic = gethtml(url1)
title = contentdic["title"]
body = contentdic["content"]
xx = translate(body)
createpdf(xx, title)

翻译api

pycharm license

K6IXATEF43-eyJsaWNlbnNlSWQiOiJLNklYQVRFRjQzIiwibGljZW5zZWVOYW1lIjoi5o6I5p2D5Luj55CG5ZWGOiBodHRwOi8vaWRlYS5oay5jbiIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiIiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9LHsiY29kZSI6IkFDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9LHsiY29kZSI6IkRQTiIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJQUyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJHTyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJETSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJDTCIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJSUzAiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUkMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUkQiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUEMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUk0iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiV1MiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiREIiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiREMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUlNVIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9XSwiaGFzaCI6IjEzMjkyMzQwLzAiLCJncmFjZVBlcmlvZERheXMiOjcsImF1dG9Qcm9sb25nYXRlZCI6ZmFsc2UsImlzQXV0b1Byb2xvbmdhdGVkIjpmYWxzZX0=-f8GvMiFGxAImRG8KKudyJDmZkDYD5fQiMOSFnBEMuAkeHjkq3rcj19hqQ1OS9nLCO4RvhRMINgYtKi3jVeZADAf6HKMnzDisWECB7ms8EgZoWOzTdKi3vw2pCpck5k6U6RXJmFlebIIbjA/KrzlPCPt9BfMZQ9NN5OdXDYXN9ZCvgG3vt5S0ZShPDNMQllSJt8OSerE1daj+nOP8f6WiUpgrYkHwydzF/NBlejdjvkMZp3iCk+ylKhYW5OgfnChCwWEyEmmIaNj4xYyeL3WMLqHm82Uo3bQnKkUU8eO0WOmJPfO2NGrVIeM5SEl1iu8odKX4fes5u+duTRCKjbDLAg==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==

baidu api翻译正式

python 模块:

aiohttp==3.6.2
async-timeout==3.0.1
attrs==19.3.0
beautifulsoup4==4.8.2
bs4==0.0.1
certifi==2019.11.28
chardet==3.0.4
cssselect==1.1.0
idna==2.8
idna-ssl==1.1.0
lxml==4.4.2
multidict==4.7.5
pdfkit==0.6.1
Pillow==7.0.0
pymongo==3.10.1
PyPDF2==1.26.0
pyquery==1.4.1
redis==3.4.1
requests==2.22.0
selenium==3.141.0
soupsieve==1.9.5
typing-extensions==3.7.4.1
urllib3==1.25.7
yarl==1.4.2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5:44 PM 3月/13日/2020年
# @Author : yon
# @Email : xx@qq.com
# @File : baidutranslate import http.client
import hashlib
import urllib
import random
import json
from pyquery import PyQuery as pq
import time
import pdfkit class baidu_Trans:
def __init__(self):
self.httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') def __del__(self):
if self.httpClient:
self.httpClient.close() def baidu_translate(self, word):
appid = '' # 填写你的appid
secretKey = '' # 填写你的密钥 myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种
toLang = 'zh' # 译文语种
salt = random.randint(32768, 65536)
sign = appid + word + str(salt) + secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(
word) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(salt) + '&sign=' + sign try:
time.sleep(1) self.httpClient.request('GET', myurl) # response是HTTPResponse对象
response = self.httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
return result.get('trans_result')[0].get('dst') except Exception as e:
return False def destory(self):
if self.httpClient:
self.httpClient.close() def american_life(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://cn.bing.com/',
'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
}
doc = pq(url=url, headers=headers)
article = doc('article')
title = doc('h1').text().strip().replace(" ", "-")
sb_baidu = baidu_Trans()
for i in range(len(article('p'))):
# print("开始翻译\n")
text = article('p').eq(i).text()
print(text)
translate = sb_baidu.baidu_translate(text)
taged_text = '<pre style="word-wrap:break-word;white-space: pre-wrap;">{}</pre>'.format(translate)
print(translate)
article('p').eq(i).append(taged_text) sb_baidu.destory()
dic = {
"title": title,
"html": doc('article').html()
}
return dic def create_to_pdf(url):
html_to_pdf = american_life(url)
ddoc = '<head><meta charset="UTF-8"></head>{}'.format(html_to_pdf['html'])
pdfkit.from_string(str(ddoc), "/home/baixiaoxu/desk/{}.pdf".format(html_to_pdf['title'])) if __name__ == '__main__':
create_to_pdf("https://www.thisamericanlife.org/688/transcript")

下载Americanlife 语音材料的更多相关文章

  1. 通过脚本自动下载Esri会议材料

    在Esri的官网上,可以下载到Esri参加或者举办的各类会议的材料.官方地址为:http://proceedings.esri.com/library/userconf/index.html. 针对某 ...

  2. 【代码笔记】iOS-播放从网络上下载的语音

    代码: ViewController.m #import "ViewController.h" //录音 #import <AVFoundation/AVFoundation ...

  3. pypdf2:下载Americanlife网页生成pdf合并pdf并添加书签

    初步熟悉 安装 pip install pypdf2 合并并添加书签 #!/usr/bin/env python3.5 # -*- coding: utf-8 -*- # @Time : 2019/1 ...

  4. 如何利用Linux去油管下载带字幕的优质英文资料提升英文听力和词汇量

    非常方便地从油管下载你需要的任何英文视频资料,并且带字幕,方便你学习某个特定领域的词汇: [step1,Centos6系统安装youtbe-dl下载带英文字幕的视频] 1.首先需要安装youtube- ...

  5. php在web端播放amr语音(如微信语音)

    在使用微信JSSDK的上传下载语音接口时,发现一个问题: 下载的语音在iPhone上不能播放,测试了之后原因竟然是: 微信接口返回的音频内容是amr格式的,但iPhone不支持播放此类型格式. 那么转 ...

  6. freeswitch 中文语音

    1.下载中文语音包 链接:https://pan.baidu.com/s/1UODvqj8NAQw7_CRatfl0kg 提取码:qwdn 创建目录 /usr/local/freeswitch/sou ...

  7. Delphi百度语音【支持语音识别和语音合成】

    作者QQ:(648437169) 点击下载➨百度语音         语音识别api文档         语音合成api文档 [Delphi 百度语音]支持获取 Access Token.语音识别.语 ...

  8. 用Spring中的ResponseEntity文件批量压缩下载

    我看了很多网上的demo,先生成ZIP压缩文件,然后再下载. 我这里是生成ZIP文件流 进行下载.(核心代码没多少,就是一些业务代码) @RequestMapping(value = "/& ...

  9. 听VOA还不如学这些 (转自知乎恶魔奶爸)

    该专栏文章网址 http://zhuanlan.zhihu.com/aisapo/19634180 鉴于知乎无法插图片和音频,所以有了这篇教程集合,大家看这个就足够了其实 每次一学英语,材料无非就是V ...

随机推荐

  1. 石子合并2——区间DP【洛谷P1880题解】

    [区间dp让人头痛……还是要多写些题目练手,抽空写篇博客总结一下] 这题区间dp入门题,理解区间dp或者练手都很妙 ——题目链接—— (或者直接看下面) 题面 在一个圆形操场的四周摆放N堆石子,现要将 ...

  2. Elastic Search中mapping的问题

    Mapping在ES中是非常重要的一个概念.决定了一个index中的field使用什么数据格式存储,使用什么分词器解析,是否有子字段,是否需要copy to其他字段等.Mapping决定了index中 ...

  3. 【IntelliJ IDEA】tomcat启动,打印日志乱码问题 【最新解决方法请看最后附录】

    刚开始给idea上配置了一个tomcat,然后跟着http://wiki.jikexueyuan.com/project/intellij-idea-tutorial/theme-settings.h ...

  4. audio隐藏下载按钮

    // 这个方法只支持 Chrome 58+, 低于该版本的是没有无法隐藏的 <audio src="/i/horse.ogg" controls="controls ...

  5. 怎样在 Vue 中使用 v-model 实现双向数据绑定?

    1. 所谓 双向数据绑定, 可以理解为: 修改 A , B 会跟着被修改, 修改 B , A 会跟着被修改. 常用在需要 进行用户输入的地方, 比如 这些 html 标签:  input.select ...

  6. C#Linq之求和,平均值,最大值,最小值

    using System;using System.Collections.Generic;using System.Linq;using System.Text;using System.Threa ...

  7. 1 SQL SERVER 实现字符串分割成table的方法

    CREATE FUNCTION [dbo].[fn_SplitStringToTable] ( @p_Input VARCHAR(MAX), @p_Delimeter CHAR() = ',' ) R ...

  8. TCP/IP网络知识

    1.TCP/IP概念 TCP/IP不是单指一种传输协议,而是一组传输控制协议/互联网协议. 2.TCP/IP分层 (计算机网络中,实际应用的网络协议是TCP/IP协议族,TCP/IP的应用层大体上对应 ...

  9. Java并发与多线程

    1. 并发与并行 并发是指某个时间段内,多任务交替处理的能力:并行是指同时处理多任务的能力,多核CPU可以实现并行任务. 并发执行的特点: (1)并发程序间相互制约:程序执行结果的相互依赖以及共享资源 ...

  10. 【死磕 Java 集合】— ConcurrentSkipListMap源码分析

    转自:http://cmsblogs.com/?p=4773 [隐藏目录] 前情提要 简介 存储结构 源码分析 主要内部类 构造方法 添加元素 添加元素举例 删除元素 删除元素举例 查找元素 查找元素 ...