基于xpath選擇器PyQuery正則表達式的格式清理工具詳解
這篇文章主要介紹了基于xpath選擇器、基于具詳解Py??Query、擇器y正??則表正則表達式的達式的格格式清理工具,本文給大家介紹的式清非常詳細,對大家的(de)理工學(xué)習或工作具有一定的參考借鑒價(jià)值,需要的朋友可以參考下
1,使用xpath清理不必要的基于具詳解標簽元素,以及無(wú)內容標簽
from lxml(′?`) import etree
def xpath_clean((′_`)self,擇器y正則表 text: str, xpath_dict: dic??t) -> str:
'''
xpath 清除不必要的元素
:param text: html(⊙_⊙)_content
:param xpath_dict: 清除目標xpath
:return: string type html_content
'''
remove_by_xpath = xpath_dict if xpath_dict else dict()
# 必然清除的項目 除非極端情況 一般這些都是要清除的
remove_by_xpath.update({
'_remove_2': '//iframe',
'_remove_4': '//??button',
'_remove_5': '//form',
'_remove_??6': '//input',
'_remove_7': '//sel???ect',
'_remove_8': '//option',
'_remove_9': '//textarea',
'_remove_10': '//figure',
'_re??move_11': '//figcaption',
'_remove_12': '//frame',
'_remove_13': '//video',
'_remove_14': '//script',
'_remove_15': '//style'
})
parser = etree.HTMLParser(remove_blank_text=True, remove_comments=True)
selector = etr??ee.HTML(text, parser=parser)
# 常規刪除操作,不需要的達式的格標簽刪除
for xpath in remove_by_xpath.va(╬?益?)lues():
for bad in selector.x┐(′д`)┌path(xpath):
bad_string = etree.tostring(bad,?? encoding='utf-8',
pretty_p??rint=True).decode()
logge??r.debug(f"clean article content : { bad_string}")
bad.getparent().remove(bad)
skip_tip='name()='img' or name()='tr' or ' \
"name()='th'(′?_?`) or name()='tbody' or " \
"name()='thead' or name()='table'"
# 判斷所有p標簽,??是式清否有內??容存在(zai),沒(méi)有的理工直接刪除
for p in selector.xpath(f"http://*[not({ skip_tip})]"):
# 跳過(guò)邏輯
if p.xpath(f".//??┐(′?`)┌*[{ skip_tip}]") or \
bool(re.sub('\s', '', p.xpat??h((′?_?`)'string(.)'))):
continue
bad??_p = etree.tostring(p, encoding='utf-8',
pretty_print=True).decode()
logger.debug(f"clean p tag : { bad_p}")
p.getparent().remove(p)
return etree.tostring(selector, encoding='utf-8',
pretty_print=True).decode()
2,使用pyquery清理標簽屬性,基于具詳解并返回處理后源碼和純凈文本
#!/usr/bin/env python
# -*-coding:(???)utf-8-*-
f(???)rom pyq??uery import PyQuery as pq
def pyquery_clean(?self,擇器y正則表 text, url, pq_dict) -> object:
'''
pyquery 做出必要的處理,
:param text:
:param url:
:param pq_dict:
:return:
'''
# 刪除pq表達式字典
remove_by_pq = pq_dict if pq_dict else dict()
# 標簽屬性白名單
att??r_white_lisヽ(′▽?zhuān)?ノt = ['rowspan',達式的格 'colspan']
# 圖片鏈接key
img_key_list = ['src', 'data-echo', 'data-src', 'data-ori??ginal']
# 生成py??query對(′;ω;`)象
dom = pq(text)
# 刪除無(wú)用標簽
for bad_tag in remove_by_pq.values()(′?ω?`):
for bad in dom(bad_tag):
bad_string = pq(bad).html()
logger.debug(f"clean article content : { bad_string}")
dom.remove(bad_tag)
# 標簽各個(gè)屬??性處理
for tag in dom('*'):
for key, value in tag.attrib.items(′;д;`)()??:
# 跳過(guò)邏??輯,保留表格的式清rowspan和colspan屬性
if key in attr_w???hite_list:
continue
# 處理圖片鏈接,不完整url,理工補充完整后替換
if key in img_key_list:
img_url = self.abs(′;ω;`)olute_url(ur??l, value)
pq(tag).remove_attr(key)(′?`)
pq(tag).attr('src', img_??url)
pq(tag).attr('alt', '')
# img標簽的alt屬性保留為空
elif key == 'alt':
pq(tag).attr(key, '')
# 其余所有屬性做刪除操作
else:
pq(tag).remove_attr(key)
return dom.text(), dom.html()
3,正則表達清理空格以及換行符內容
#!/usr/bin/env python
# -*-coding:utf-8-*-
import re
def regul??ar_clean(self, str1: str, str2: str):
'''
正則表達式處理數據格式(′?_?`)
:param str1: content
:param str2: html_content
:return: 返回處理后的結果
'''??
def new_line(text):
text = re.sub('(′_`)<br\s?/?>', '<br>', text)
text = re.sub(
'&l??t;/?a>|</?em>???;|&ヾ(′?`)?lt;/?html>|</?body>|'
'</?head>|<[a-zA-Z]{ 1,10}\s?/>|'
'</?strong>|</?b??lockquote>|</?b>|'
'<??/?spa??n>|&??lt;/?i>|</?hr>|</?font>',
'',
text)
text = re.sub('\n', ''??, text)
text = re.sub('<h[1-6]>', '<p>', text)
text = re(╯‵□′)╯.sub('</h[1-6]>', '</p&g( ?ω?)t;', text)
text = text.replace('</p>', '</p>\n').replace('<br>', '<b??r/>')
return text
str1, str2 = self.clean_blank(str1), self.clean_blank(str2) # TODO 處理空白行問(wèn)題
# TODO html_coヽ(′▽?zhuān)?ノntent處理 1,刪除多余的無(wú)法使用的標簽以及影響數據展示的標簽 2,換行符問(wèn)題處理以及更換
str2 = new_line(text=str2)
return str1, str2
結尾部分,各個(gè)方法封裝類(lèi)代碼展示
#!/usr/bin/env python
# -*-coding:utf-8-*-(???)
'''
author: szhan
date:2020-08-17??
s??ummery: 清理html_conent以及獲取純凈數據格式
'''
import re
from lxml import etree
from pyquery import PyQuery as pq
from urllib.??parse import urlsplit, urljoin
from loguru import logger
class CleanArticle:
def __init__(
self,
text: str,
url: str = '',
xpath_dict: dict = None,
pq_dict: dict = None
):
self.text = text
self.url = url
self.xpath_dict = xpath_dict or dict()
self.pq_dict = pq_dict or dict()
@stati??cmethod
def absolute_url(baseurヽ(′ー`)ノl: str, url: str) -> str:
'''
補充url
:param baseurl:scheme ur?l
:param url: target url
:return: complete url
'''
target_url = url if urlsplit(url).scheme el(╬ ò﹏ó)se urljoin(baseurl, url)
return target_url
@sta??ticmethod
def clean_blank(text):
'''
空白處理
:param text:??
:return:
'''
text = text.replace(' ', '').replace('\u3000', '').replace('\t', '').replace('\xa0', '')
text = re.sub('\s{ 2,}', '', text)
textヽ(′▽?zhuān)?ノ = re.sub('\n{ 2,}', '\n', text)
text = text.st(′?_?`)rip('\n').strip()
return text
def run((╯°□°)╯self):(?????)
'''??
:return:處理后的content, html_content
'''
if (not bool(self.text)) or (not isinstance(self.text, str)):
raise ValueError('html_content has a bad type value')
# 首先,ヾ(′ω`)?使用xpath去除空格,以及注釋?zhuān)琲frame, button, form, script, style, video等??標簽
text = self.xpath_clean(self.text, self.xpath_dict)
# 第二步,使用pyquery處理具(ju)體細節方??面(mian)
str1, str2 = self.pyqu??e??ry_clea??n(text, self.url,( ?ヮ?) self.pq_dict)
# 最(zui)終的正則處理
content, htm?l_c??ontent = self(⊙_⊙).regular_(′ω`*)clean(str1, str2)
return content, html_content
def xpath_clean(self, text: str, xpath_dict: dict) -> str:
'''
xpath 清除不必要的元素
:param te??xt: html_content
:param xpath_dict: 清除目標xpath
:return: string type html??_content
'''
remove_by_xpath = xpa(°ロ°) !th_dict if xpath_dict else dict()
# 必然清除的項目 除非極端情況 一般這些都是要清除的
remove_by_xpath.update({
'_remove_2': '//iframe',
'_remove_4': '//button',
'_remove_5': '//form',
'_remove_6': '//input',
'_remove_7': '//select',
'_remove_8': '//option',
'_remove_9': '//textarea',
'_remove_10'(⊙_⊙): '//figure',
'_remove_11': '//figcaption',
'_remove_12': '//frame',
'_remove_13': '//video',
'_remove_14': '//s(′?`*)cript',
'_remove_15': '//style'
})
parser = etree.HTMLP??arser(remo(╬?益?)ve_blank_text=True, remove_comments=True)
selector = etree.HTML(te??xt, parser=parヾ(^-^)ノser)
# 常規刪除操作,不需要的標簽刪除
for xp??ath in remove_by_xpath.values():
for bad in selector.xpath(xpath):
bad_string = etree.tostring(bad, encoding='utf-8',
pretty_print=Tr(′▽?zhuān)?ue).decode()
logger.debug(f"clean article?? content : { bad_string}")
bad.get(╯°□°)╯︵ ┻━┻parent().remo??v(//ω//)e(bad)
skip_tip='name()='img' or name()='tr' or ' \
"name()='th' or name()='tbody' or " \
"name()='thead' or name??()='table'"
# 判斷所有p標簽,是否有內容存??在,沒(méi)有的直接刪除
for p in selector.xpath(f"http://*[not({ skip_tip})]"):
# 跳過(guò)邏輯
if p.xpath(f".//*[{ sk(°□°)ip_tip}]") or \
boo(′?`)l(re.sub('\s'(╯‵□′)╯, '', p.xpath('string(.)'))):
continue
bad_p = etree.tostring(p, encoding='utf-8',
pretty_prin?t=Tr??ue).decode()
logger.debug(f"clean p tag : { bad_p}")
p.getparent().remove(p)
return etree.tostring(′?_?`)(selector, encoding='u┐(′ー`)┌tf-8',
pretty_p(╬?益?)rint=True).decode()
def py??query_ヽ(′▽?zhuān)?ノclean(self, text, url, pq_d(╬?益?)ict) -> object:
'''
pyquery 做出必要的處理,
:param text:
:param ur??l:
:param pq_dict:
:return:
'''
# 刪除pq表達式字典
remove_by_pq = pq_dict if pq_dict else dict()
# 標簽屬性白名單
attr_white_list = ['rowspan', 'colspan']
# 圖片鏈接key
img_key_list = ['src', 'data-echo', 'data-src', 'data-original']
# 生成pyquery對象
dom = pq(text)
# 刪除無(wú)用標簽
for bad_tag in remove_by_pq.values():
for bad in dom(bad_tag):
bad_string?? = pq(bad).html()
logger.debug(f"clean article content : { bad_string}")
dom.remove(bad_tag)
# 標簽各個(gè)屬性處理
for tag in dom('*'??):
for key, value in tag.attrib.items():
# 跳過(guò)邏輯,保留表格的rowspan??和col(′ω`*)span屬性
if(?????) key in attr_white_list:
continue
# 處理圖片鏈接,不完??整url,補充完整后替換
if key in img_key_list:
img_url = self.absolute_url(url, value)
pq(tag).remove_attr(key)
pq??(tag).attr('src', img_url)
pq(tag).attr('alt', '')
# img標簽的alt屬性保留為空
elif key == 'alt':
pq(tag).attr(key, '')
# 其余所有屬性做刪除操作
else:
pq(tag).remove_attr(ke(?Д?)y)
return dom.text(), dom.html()
def regul??ar_ヽ(′ー`)ノclean??(self, str1: str, str2: str):
'''
正則表達ヽ(′?`)ノ式處理數據格式
:param str1: content
:param?? str2: html_content
:retur(′ω`*)n: 返回處理???后的結果
'''
def new_line??(text):
text = re.sub('<br\s?/?>', '<br>', text)
text = re.sub(
'</?a>|</(°□°)?em>|</?html>|</?bod(′▽?zhuān)?y>|'
'</?head>|<[a-zA-Z]{ 1,10(?????)}\s?/>|'
'</???strong>|</?(′▽?zhuān)?)blockquote&g?t;|</?b>|'
'</?span>|( ?ヮ?)</?i&g??t;|</?hr>|</?font>',
'',
text)
text = re.sub('\n', '', text)
text = re.sub('<h[1-6]>', '<p>', text)
text = re.sub('</h[1-6]>', '</p>', text)
text = text.replace('</p>', '</p>\n').replace('<br>', '<br/>')
return text
str1, str2 = self.c(?????)lean_blank(str1), self.clean_blank(str2) # TODO 處理空白行問(wèn)題
# TODO html_content處理 1,刪除多余的無(wú)法使(shi)用(???)的標簽以及影響數據展示的標簽 2,換行符問(wèn)題處理以及更換
str2 = new??_line(text=str2)
return str1, str2
if __name__ == '__main__':
with open=""('html_content.html', 'r', encoding='utf-ヾ(′ω`)?8') as f:
lines = f.readlines()
html = ''
for line in lines:
html +=?? line
ca = CleanArticle(text=html)
_, html_content = ca.run()
pr(′?`)int(html_content)
到此這篇關(guān)于基于xpath選擇器、PyQuery、正則表達式的格式清理工具詳解的文章就介紹到這了,更多相關(guān)PyQuery、正則表達式的格式清理工具內容請搜索腳本之家以前的文章或繼續瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
來(lái)源:(′?`)腳本之家
鏈接:https://www.jb51??.net/article/194906.htm




