【问题标题】:RuntimeWarning , RuntimeError (Python Al Chat Bot on Discord Server)RuntimeWarning , RuntimeError (Discord 服务器上的 Python Al Chat Bot)
【发布时间】:2022-01-16 08:15:44
【问题描述】:

我的目标:能够整合 Al Chatbot 和 Discord

import nltk

nltk.download('punkt')

from nltk.stem.lancaster import LancasterStemmer
stemmer=LancasterStemmer()
import numpy
import tflearn
import tensorflow
import random
import json
import pickle
import nest_asyncio
import asyncio
#---------------------------------------------------------------------------
import discord 
import os





with open("intents.json") as file:
     data=json.load(file)
     print(data['intents'])
     
try:
    with open("data.pickle","rb") as f:
        words,labels,training,output=pickle.load(f)
except: 
    words=[]
    labels=[]
    docs_x=[]
    docs_y=[]
    
    for intent in data['intents']:
        for pattern in intent['patterns']:
            wrds=nltk.word_tokenize(pattern)
            words.extend(wrds)
            docs_x.append(wrds)
            docs_y.append(intent["tag"])
             
            
            if intent["tag"] not in labels:
               labels.append(intent["tag"])
               
               
    #remove duplicate          
    words=[stemmer.stem(w.lower()) for w in words if w != "?"]
    
    words=sorted(list(set(words)))
    
    labels=sorted(labels)
     
    
    training=[] 
    output=[]
    
    out_empty=[0 for _ in range(len(labels))]
    
    for x, doc in enumerate(docs_x):
        bag=[]
        wrds=[stemmer.stem(w) for w in doc]
        
        for w in words:
          if w in wrds:
             bag.append(1)
          else:
             bag.append(0)
             
        output_row=out_empty[:]
             
        output_row[labels.index(docs_y[x])]=1
        
        training.append(bag)     
        output.append(output_row)
        
    training=numpy.array(training)
    output=numpy.array(output)
    
    with open("data.pickle","wb") as f:
        pickle.dump((words,labels,training,output),f)

tensorflow.compat.v1.reset_default_graph()

net=tflearn.input_data(shape=[None,len(training[0])])
net=tflearn.fully_connected(net,16) 
net=tflearn.fully_connected(net,16)
net=tflearn.fully_connected(net,len(output[0]),activation="softmax")
net=tflearn.regression(net)

model=tflearn.DNN(net)

model.fit(training, output,n_epoch=10000,batch_size=16,show_metric=True )    

model.save('C:/Users/Desktop/chatbot/model/model.tflearn')
model.load('C:/Users/Desktop/chatbot/model/model.tflearn')

    





def bag_of_words(s,words):

   bag=[0 for _ in range(len(words))]
   s_words=nltk.word_tokenize(s)
   s_words=[stemmer.stem(word.lower()) for word in s_words]

 
   for se in s_words:
       for i,w in enumerate(words):
           if w==se:
              bag[i]=1

   return numpy.array(bag)



def chat():
    print("start talking with the bot (type quit to stop!")
    while True:
        inp=input("You:")
        if inp.lower()=="quit":
           break
       
        results= model.predict([bag_of_words(inp,words)])[0]
        # print("results:",results)
       
        results_index=numpy.argmax(results)
        
        if results[results_index]>0.7:
            
                
            tag=labels[results_index]
            print("tag:", tag)
        
            for tg in data["intents"]:
                if tg["tag"]==tag:
                   responses=tg['responses']
            
            client=discord.Client()             #FOR DISCORD--------------------------------------
            async def on_message(message):
                if inp.author == client.user:
                   return
               
                if inp.content.startswith("$M-bot"):
                    response=responses.request(inp.content[7:])
                    await asyncio.sleep(5) 
                    await inp.channel.send(response) 
                    
            
            on_message(inp)    
            client.run("API KEY TAKEN FROM DISCORD for BOT")
            print("Bot:",random.choice(responses))
         
        else:
          print("I didn't get that. Please try again")  
          
chat()  

警告和错误(Pyconsole):

start talking with the bot (type quit to stop!

You:hello
tag: greeting
C:/Users/Desktop/chatbot/chatbot.py:154: RuntimeWarning: coroutine 'chat.<locals>.on_message' was never awaited
  on_message(inp)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
Traceback (most recent call last):

  File "F:\Anaconda\lib\site-packages\discord\client.py", line 713, in run
    loop.run_forever()

  File "F:\Anaconda\lib\asyncio\base_events.py", line 560, in run_forever
    self._check_running()

  File "F:\Anaconda\lib\asyncio\base_events.py", line 552, in _check_running
    raise RuntimeError('This event loop is already running')

RuntimeError: This event loop is already running


During handling of the above exception, another exception occurred:

Traceback (most recent call last):

  File "F:\Anaconda\lib\site-packages\discord\client.py", line 90, in _cleanup_loop
    _cancel_tasks(loop)

  File "F:\Anaconda\lib\site-packages\discord\client.py", line 75, in _cancel_tasks
    loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))

  File "F:\Anaconda\lib\asyncio\base_events.py", line 592, in run_until_complete
    self._check_running()

  File "F:\Anaconda\lib\asyncio\base_events.py", line 552, in _check_running
    raise RuntimeError('This event loop is already running')

RuntimeError: This event loop is already running


During handling of the above exception, another exception occurred:

Traceback (most recent call last):

  File "C:/Users/Desktop/chatbot/chatbot.py", line 162, in <module>
    chat()

  File "C:/Users/Desktop/chatbot/chatbot.py", line 155, in chat
    client.run("API KEY TAKEN FROM DISCORD for BOT")

  File "F:\Anaconda\lib\site-packages\discord\client.py", line 719, in run
    _cleanup_loop(loop)

  File "F:\Anaconda\lib\site-packages\discord\client.py", line 95, in _cleanup_loop
    loop.close()

  File "F:\Anaconda\lib\asyncio\selector_events.py", line 89, in close
    raise RuntimeError("Cannot close a running event loop")

RuntimeError: Cannot close a running event loop

问题:你好朋友,我正在尝试制作一个可以处理不和谐的聊天机器人,并且可以通过我构建的人工智能模型给出答案,但是我收到了 RuntimeWarning: E​​nable tracemalloc to get the object allocation tracebackRuntimeError: This event loop is already running 我该如何解决这些问题?

【问题讨论】:

    标签: python-3.x async-await discord.py python-asyncio tflearn


    【解决方案1】:

    您的错误是因为您不断重新启动discord.Client。在每个程序中,应该只有一个discord.Client 的实例。如果你想让它吐出最后一个响应,你应该把客户端移出循环。设置机器人对全局变量的响应,并在发送命令时让机器人吐出全局变量

    【讨论】:

    • 我添加了一个不会报错的代码,但是我没有达到我想要的。只有当我通过 Discord 向机器人提问时,我才想得到答案,但它允许通过 Pyconsole 提问和回答。你能帮我修一下代码吗?
    【解决方案2】:

    安排:

    
        import nltk
        
        nltk.download('punkt')
        
        from nltk.stem.lancaster import LancasterStemmer
        stemmer=LancasterStemmer()
        import numpy
        import tflearn
        import tensorflow
        import random
        import json
        import pickle
        import nest_asyncio
        import asyncio
        #-------------------------------------------------------------------------------
        import discord 
        import os
        
        
        
        with open("intents.json") as file:
             data=json.load(file)
             print(data['intents'])
             
        client=discord.Client() #OUT OF LOOP
        
        @client.event                        #LISTEN EVENTS
        async def on_message(message):
                  
                  if message.author == client.user:
                     return
                 
                  if message.content.startswith("$M-bot"):
                      response=responses.request(message.content[7:])
        
                      await message.channel.send(response) 
                     
            
               
           
             
        try:
            with open("data.pickle","rb") as f:
                words,labels,training,output=pickle.load(f)
        except: 
            words=[]
            labels=[]
            docs_x=[]
            docs_y=[]
            
            for intent in data['intents']:
                for pattern in intent['patterns']:
                    wrds=nltk.word_tokenize(pattern)
                    words.extend(wrds)
                    docs_x.append(wrds)
                    docs_y.append(intent["tag"])
                     
                    
                    if intent["tag"] not in labels:
                       labels.append(intent["tag"])
                       
                       
            #remove duplicate          
            words=[stemmer.stem(w.lower()) for w in words if w != "?"]
            
            words=sorted(list(set(words)))
            
            labels=sorted(labels)
             
            
            training=[] 
            output=[]
            
            out_empty=[0 for _ in range(len(labels))]
            
            for x, doc in enumerate(docs_x):
                bag=[]
                wrds=[stemmer.stem(w) for w in doc]
                
                for w in words:
                  if w in wrds:
                     bag.append(1)
                  else:
                     bag.append(0)
                     
                output_row=out_empty[:]
                     
                output_row[labels.index(docs_y[x])]=1
                
                training.append(bag)     
                output.append(output_row)
                
            training=numpy.array(training)
            output=numpy.array(output)
            
            with open("data.pickle","wb") as f:
                pickle.dump((words,labels,training,output),f)
        
        tensorflow.compat.v1.reset_default_graph()
        
        net=tflearn.input_data(shape=[None,len(training[0])])
        net=tflearn.fully_connected(net,16) 
        net=tflearn.fully_connected(net,16)
        net=tflearn.fully_connected(net,len(output[0]),activation="softmax")
        net=tflearn.regression(net)
        
        model=tflearn.DNN(net)
        
        model.fit(training, output,n_epoch=5000,batch_size=16,show_metric=True )    
        
        model.save('C:/Users/Desktop/chatbot/model/model.tflearn')
        model.load('C:/Users/Desktop/chatbot/model/model.tflearn')
        
            
        
        def bag_of_words(s,words):
        
           bag=[0 for _ in range(len(words))]
           s_words=nltk.word_tokenize(s)
           s_words=[stemmer.stem(word.lower()) for word in s_words]
        
         
           for se in s_words:
               for i,w in enumerate(words):
                   if w==se:
                      bag[i]=1
        
           return numpy.array(bag)
        
        
        
        def chat():
            global responses          #GLOBAL VARIABLES
            global inp                #GLOBAL VARIABLES
            print("start talking with the bot (type quit to stop!")
            while True:
                inp=input("You:")
                if inp.lower()=="quit":
                   break
               
                results= model.predict([bag_of_words(inp,words)])[0]
                # print("results:",results)
               
                results_index=numpy.argmax(results)
                
                if results[results_index]>0.7:
                    
                        
                    tag=labels[results_index]
                    print("tag:", tag)
                
                    for tg in data["intents"]:
                        if tg["tag"]==tag:
                           responses=tg['responses']
                    
                 
                   
                    
                    print("Bot:",random.choice(responses))
                 
                else:
                  print("I didn't get that. Please try again")  
                  
        
        chat()  
          
        client.run("API KEY") 
    
    

    【讨论】:

      猜你喜欢
      • 1970-01-01
      • 1970-01-01
      • 2021-01-12
      • 1970-01-01
      • 2021-07-28
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      • 1970-01-01
      相关资源
      最近更新 更多