【发布时间】:2025-11-21 07:25:03
【问题描述】:
我正在编写一些代码来从网站上抓取段落。虽然代码有点混乱和混乱,但我认为它仍然清晰易读。
唯一的问题是我在编写它时遇到了一个大障碍。当段落被写出时,它们似乎与该页面中的段落相连,我希望每个段落作为其单独的列表元素,而不是主列表中较小列表的一部分。
我想要的输出是与查询最相关的顶部段落。我已经有了所有的东西来检查哪些段落对查询的可靠性,但是就像我说的那样,当我将一页中的所有段落写到一个 txt 文件中进行检查时,它们似乎都组合在一起了。
这是我的代码:
#qresultsl is a list of links
for xa in range(0, qresultsl):
URL=ALLresults[xa].format()
URL=str(URL)
URL=URL.replace("'","")
URL=URL.replace("[","")
URL=URL.replace("]","")
pageURL=URL
try:
pr=requests.get(pageURL, headers=headers)
except:
print("Couldn't scrape ",pageURL)
continue
if pr.status_code==200:
try:
psoup=BeautifulSoup(pr.text, 'html.parser')
paragraphs=[''.join(s.findAll(text=True))for s in
psoup.findAll('p')]
presults.append(paragraphs)
except:
print("Couldn't scrape ", pageURL)
continue
else:
print("Couldn't scrape ",pageURL)
continue
# Results
print("\r")
print(len(presults)," websites scraped of ", numresults)
print(len(presults)," pages of content ready for next phase of processing.")
paraList = []
for i in presults:
#make all keywords one big list
paraList = sum(presults, [])
presults = paraList
cleanparagraphs=[]
rangenum=len(presults)
print(presults)
def cleanresults():
ct=0
for dd in range(0,rangenum):
cleaned=presults[ct]
cleaned=str(cleaned)
cleaned=cleaned.replace("/","")
cleaned=cleaned.replace("]","")
cleaned=cleaned.replace("[","")
cleaned=cleaned.replace("'","")
cleaned=cleaned.replace("\n","")
NEWITEM=cleaned
ct=ct+1
cleanparagraphs.append(NEWITEM)
cleanresults()
presults = cleanparagraphs
paragraphs = []
for z in range(len(presults)):
pagei=presults[z]
pagei=str(pagei)
pagei=pagei.replace("[","")
pagei=pagei.replace("'","")
pagei=pagei.replace("]","")
pageHtml = pagei #i wasn't "stupid".
paragraphs.append(presults[z])
'''with open('paragraphs.txt', 'r') as f:
paragraphs = ast.literal_eval(f.read())
'''
'''for i in paragraphs:
#make all paragraphs one big list
paragraphs = sum(paragraphs, [])'''
resultspara=[]
for le in paragraphs:
if le not in resultspara:
resultspara.append(le)
paragraphs=resultspara
og=len(presults)
nl=len(paragraphs)
removed=og-nl
print(removed, " duplicates removed")
lst = []
cp=0
for para in paragraphs:
lst.append(paragraphs[cp].lower())
cp=cp+1
rem=str(lst)
rem=rem.replace("\r","")
rem=rem.replace("\n","")
rem=rem.replace('"\r\n','')
final_list=rem.split("#####")
phrase1 = query
phrase2 = query2
phrase3 = query3
phrase4 = query4
paragraphs=final_list
ammntRemoved = 0
for i in paragraphs:
if len(i) < 20:
paragraphs.remove(i)
ammntRemoved = ammntRemoved + 1
print("removed " + str(ammntRemoved) + " small lines")
randomVariable = []
for i in paragraphs:
randomVariable = sum(paragraphs, [])
paragraphs = randomVariable
def getRelated(phrase):
splitWords = phrase.split() #split the sentence for proccessing
associatedWords = [splitWords] #add spitwords to associatedWords; associatedWords will be the main variable to add processed words to
finalWords = [] #created the variable that will have the final parsde and deduped list
for word in splitWords:
#get associated words for each word in the phrase
html = requests.get("https://api.wordassociations.net/associations/v1.0/json/search?apikey=8c124543-3a0d-4ac9-b6b4-cda92d7d1411&text="+ word + "&lang=en")
theJson = html.text
source = json.loads(theJson)
try:
associatedWords.append([source["response"][0]["items"][0]["item"],source["response"][0]["items"][1]["item"],source["response"][0]["items"][2]["item"],source["response"][0]["items"][3]["item"],source["response"][0]["items"][4]["item"],source["response"][0]["items"][5]["item"]])
numass=len(associatedWords)
print(numass, " associations found for ", word)
except:
print("tested word - " + word + " - had no asocciations")
for i in associatedWords:
#make all keywords one big list
finalWords = sum(associatedWords, [])
relatedKeywords = []
for word in finalWords:
#make finalwords lowercase
relatedKeywords.append(word.lower())
return finalWords
#took that out and replaced it with a for loop that does them all beforehand
phrase1 = getRelated(query)
phrase2 = getRelated(query2)
phrase3 = getRelated(query3)
phrase4 = getRelated(query4)
topic = {}
subHead1 = {}
subHead2 = {}
subHead3 = {}
def getGoodParagraphs(keywords, dictionary):
global length
for para in paragraphs:
#get the keyword frequencies in each paragraph
x = 0
for keyword in keywords:
added = para.count(keyword)
x = x + added
dictionary[para] = x
#get the lengths of used paragraphs
length = 0
length1 = 0
length2 = 0
length3 = 0
def getLen(lengthVar, dictionary):
for i in range(100):
try:
#count the number of words
lengthVar = lengthVar + len(dictionary[i].split())
except:
#break if theres no more paragraphs in said list
break
getGoodParagraphs(phrase1, topic)
getGoodParagraphs(phrase2, subHead1)
getGoodParagraphs(phrase3, subHead2)
getGoodParagraphs(phrase4, subHead3)
getLen(length, topic)
getLen(length1, subHead1)
getLen(length2, subHead2)
getLen(length3, subHead3)
#sort paragraphs least to greatest
topic = sorted(topic, key=lambda k: topic[k], reverse=True)
subHead1 = sorted(subHead1, key=lambda k: subHead1[k], reverse=True)
subHead2 = sorted(subHead2, key=lambda k: subHead2[k], reverse=True)
subHead3 = sorted(subHead3, key=lambda k: subHead3[k], reverse=True)
def appendTop10(inputList, outputList):
try:
for i in range(3):
outputList.append(inputList[i])
except:
print("> Wasnt able to append all 3 paragraphs")
finalTopic = []
finalSubHead1 = []
finalSubHead2 = []
finalSubHead3 = []
appendTop10(topic, finalTopic)
appendTop10(subHead1, finalSubHead1)
appendTop10(subHead2, finalSubHead2)
appendTop10(subHead3, finalSubHead3)
with open("article.txt", "w") as outputFile:
count=0
count2=0
count3=0
count4=0
for i in finalTopic:
filename = 'text.txt'
with open(filename, mode="w") as outfile: # also, tried mode="rb"
for s in finalTopic:
outfile.write("%s\n" % s)
for s in finalSubHead1:
outfile.write("%s\n" % s)
for s in finalSubHead2:
outfile.write("%s\n" % s)
for s in finalSubHead3:
outfile.write("%s\n" % s)
print("DONE")
没有对每个类别的顶部段落进行排序,我做错了什么?
我急需帮助,提前致谢
【问题讨论】:
-
你能把代码剪成一个最小的可重现的例子吗?这包含很多不必要的额外内容
-
对不起,我把一些东西剪掉了
-
谢谢,不用担心
-
我剪掉了很多顶部,从那时起我尝试清理段落并处理它们
-
@ironkey
URL值是什么
标签: python python-3.x list beautifulsoup python-requests