10 Commits

2 changed files with 17 additions and 8 deletions

View File

@@ -5,7 +5,8 @@ def processmsg(msg, rcpt):
if "youtube.com/watch" in msg: if "youtube.com/watch" in msg:
return msg.replace("youtube.com", "iv.datura.network") return msg.replace("youtube.com", "iv.datura.network")
elif msg.startswith("!wiki"): elif msg.startswith("!wiki"):
return sf.query_external_website("https://en.wikipedia.org/wiki/", msg.split(" ")[1]) cmd, query = msg.split(" ", 1)
return sf.query_external_website("https://en.wikipedia.org", "/wiki/" + query)
elif "good bot" in msg: elif "good bot" in msg:
return "^_^" return "^_^"
@@ -13,7 +14,8 @@ def command(msg, rcpt):
if msg.startswith("!help"): if msg.startswith("!help"):
response = "chatbot commands:" + "\n" response = "chatbot commands:" + "\n"
response += "!help Show this help page" + "\n" response += "!help Show this help page" + "\n"
response += "!ai [message] Ask llama2" response += "!ai [message] Ask llama2" + "\n"
response += "!wiki [message] Ask wiki"
return response return response
elif msg.startswith("!ai"): elif msg.startswith("!ai"):
client = ollama.Client(host='https://ollama.krov.dmz.rs') client = ollama.Client(host='https://ollama.krov.dmz.rs')

View File

@@ -1,12 +1,19 @@
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from urllib.parse import quote
def query_external_website(base_url, query): def query_external_website(base_url, query):
try: try:
page = requests.get(base_url + query) page = requests.get(base_url + quote(query))
soup = BeautifulSoup(page.content, "html.parser") soup = BeautifulSoup(page.content, "html.parser")
title = soup.find("span", class_="mw-page-title-main").text title = soup.find(id="firstHeading").text
content = soup.find(id="mw-content-text").select("p")[2].text mainContentElement = soup.find(id="mw-content-text")
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content + "\n\nFULL LINK:\n" + base_url + query if "This page is a redirect" in mainContentElement.text:
except: redirectLink = mainContentElement.find(class_="redirectMsg").find_all("a")[0]["href"]
return "Can't parse search result :(" return query_external_website(base_url, redirectLink)
content = next((paragraph for paragraph in mainContentElement.select("p") if not paragraph.has_attr("class")), None)
if content == None:
raise Exception("Can't parse search result :(")
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content.text + "\n\nFULL LINK:\n" + base_url + quote(query)
except Exception as e:
return e