【发布时间】:2020-03-10 14:22:21
【问题描述】:
我正在将文本附加到列表中,然后将文本更改为词嵌入,然后进行机器学习。 'articles' 中的 'insts' 是使用 spacy 收集的,但后来我遇到了这个错误,如下所示,有人可以告诉我如何解决这个错误吗?我可以将类型“spacy.tokens.doc.Doc”更改为“str”吗?
def main(annotations_file, max_insts=-1):
articles = reader.read_corpus(annotations_file,max_insts=max_insts)
texts=[]
random.seed(5)
random.shuffle(articles)
# arti = list()
sect = list()
label_bef=list()
label_dur=list()
label_aft=list()
for insts in articles:
for inst in insts:
texts.append(inst.possessor.doc._.article_title_doc)
#sect.append(inst.possessor.doc._.section_title_doc)
label_bef.append(inst.labels['BEF'])
label_dur.append(inst.labels['DUR'])
label_aft.append(inst.labels['AFT'])
embeddings_index = {}
with open('glove.6B.100d.txt') as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, 'f', sep=' ')
embeddings_index[word] = coefs
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
word_index = tokenizer.word_index
Traceback (most recent call last):
File "sample.py", line 117, in <module>
main(args.ANNOTATIONS_FILE, args.max_articles)
File "sample.py", line 51, in main
tokenizer.fit_on_texts(texts)
File "/home/huweilong/miniconda3/envs/nre/lib/python3.6/site-packages/keras_preprocessing/text.py", line 223, in fit_on_texts
self.split)
File "/home/huweilong/miniconda3/envs/nre/lib/python3.6/site-packages/keras_preprocessing/text.py", line 43, in text_to_word_sequence
text = text.lower()
AttributeError: 'spacy.tokens.doc.Doc' object has no attribute 'lower'
【问题讨论】:
-
随便
doc.lower_