宅男做網(wǎng)站12月30日疫情最新消息
1.移除用詞
????????在很多情況下,有一些文章內(nèi)的英文字符、標(biāo)點(diǎn)符號分詞的結(jié)果不符合自己的預(yù)期,會出現(xiàn)一些不想要的分詞,此時(shí)就能通過以下的函數(shù)自己設(shè)定用詞,并且刪除。
jieba.analyse.set_stop_words("stop_words.txt")
2.自定比重分?jǐn)?shù)
????????因?yàn)閖ieba對每一個(gè)字會給出IDF分?jǐn)?shù)比重,但是在很多時(shí)候,會希望把文章中特別的關(guān)鍵字突顯出來(或者降低),可以設(shè)定IDF分?jǐn)?shù)高一些(或低一些),就能將想要的字突顯出來(或者降低)。
jieba.analyse.set_idf_path("idf.txt") #讀入IDF關(guān)鍵字比重分?jǐn)?shù)
一個(gè)demo
import sys
from os import path
import jieba
import jieba.analyse
d=path.dirname(__file__)
jieba.load_userdict(path.join(d,r"C:\Users\nsy\Desktop\userdict.txt.txt"))
text="今天學(xué)習(xí)好煩躁,還沒有效率"
content =text
extracted_tags=jieba.analyse.extract_tags(content,topK=10,withWeight=False)
print(" ,".join(extracted_tags))
jieba.analyse.set_stop_words(path.join(d, r"C:\Users\nsy\Desktop\stop_words.txt.txt"))
weighted_tags=jieba.analyse.extract_tags(content,topK=10,withWeight=True,allowPOS=('ns','n','vn','v'))
for item in weighted_tags:keyword,weight=itemprint(f"關(guān)鍵詞:{keyword},權(quán)重:{weight}")
3.排列出最常出現(xiàn)的分詞(次數(shù)的統(tǒng)計(jì))
import sys
from os import path
import jieba
import jieba.analysed = path.dirname(__file__)# 根據(jù)Python版本打開文件
if sys.version_info > (3, 0):text = open(path.join(d, r"C:\\Users\\nsy\\Desktop\\test.txt"), 'r', encoding='utf-8').read()
else:text = open(path.join(d, r"C:\\Users\\nsy\\Desktop\\test.txt"), 'r').read()text = text.replace('\n', '')# 設(shè)置停用詞文件路徑,注意文件名是否正確
jieba.analyse.set_stop_words(r"C:\Users\nsy\Desktop\stop_words.txt.txt")
# 輸出分詞結(jié)果
print(" ".join(jieba.cut(text)))# 打印分隔線
print("-" * 10)# 使用自定義詞典
jieba.load_userdict(path.join(d, r"C:\Users\nsy\Desktop\userdict.txt.txt"))# 初始化字典存儲詞頻
dic = {}for ele in jieba.cut(text):if ele not in dic:dic[ele] = 1else:dic[ele] += 1# 按詞頻排序并輸出
for w in sorted(dic, key=dic.get, reverse=True):print("%s %d" % (w, dic[w]))
4.通過jieba來分析和計(jì)算網(wǎng)站文章所探討的主要內(nèi)容
import sys
import jieba
import jieba.analyse
import urllib.request as httplib# 網(wǎng)絡(luò)請求異常處理
try:# 網(wǎng)絡(luò)文章的網(wǎng)址url = "https://csdnnews.blog.csdn.net/article/details/140678511?spm=1000.2115.3001.5928"# 送出連接的需求req = httplib.Request(url)# 打開網(wǎng)頁response = httplib.urlopen(req)# 連接網(wǎng)頁正常(200)if response.status == 200:# 如果是 Python 3.0 以上if sys.version_info > (3, 0):# 取得網(wǎng)頁的數(shù)據(jù)并解碼contents = response.read().decode(response.headers.get_content_charset())else:# 考慮到 Python 2 不再使用,這里可以省略對應(yīng)的處理邏輯raise Exception("Python 2 is not supported")
except Exception as e:print("Error during HTTP request:", e)contents = ""# 去除不要的文字
jieba.analyse.set_stop_words("C:\\Users\\nsy\\Desktop\\stop_words.txt.txt")# 僅捕獲地名、名詞、動名詞、動詞
keywords = jieba.analyse.extract_tags(contents, topK=5, withWeight=True, allowPOS=('ns', 'n', 'vn'))# 輸出關(guān)鍵詞和相應(yīng)的權(quán)重
for item in keywords:print("%s=%f" % (item[0], item[1]))print("*" * 40)# 數(shù)據(jù)結(jié)構(gòu)字典 key:value
dic = {}# 做分詞動作
words = jieba.cut(contents)# 僅處理名詞、動名詞
for word in words:if word not in dic:dic[word] = 1 # 記錄為1else:dic[word] += 1 # 累加1# 由大到小排列并打印
for w in sorted(dic.items(), key=lambda x: x[1], reverse=True):print("%s: %d" % w)# 異常處理應(yīng)該針對具體的操作,而不是放在代碼的最后