4 Commits

Author SHA1 Message Date
t3xhno
97d613df58 Added tasks integration 2024-02-06 21:17:49 +01:00
t3xhno
1e56a84a4c Added required positional argument placeholder 2024-02-06 21:05:02 +01:00
c011383f0e fix wiki commands 2024-02-06 20:51:31 +01:00
544f8052e9 fix wiki placing it into command functions 2024-02-06 20:48:08 +01:00
2 changed files with 22 additions and 3 deletions

View File

@@ -4,9 +4,8 @@ import scraper_functions as sf
def processmsg(msg, rcpt): def processmsg(msg, rcpt):
if "youtube.com/watch" in msg: if "youtube.com/watch" in msg:
return msg.replace("youtube.com", "iv.datura.network") return msg.replace("youtube.com", "iv.datura.network")
elif msg.startswith("!wiki"): elif msg.startswith("!"):
cmd, query = msg.split(" ", 1) return command(msg, "")
return sf.query_external_website("https://en.wikipedia.org", "/wiki/" + query)
elif "good bot" in msg: elif "good bot" in msg:
return "^_^" return "^_^"
@@ -21,4 +20,10 @@ def command(msg, rcpt):
client = ollama.Client(host='https://ollama.krov.dmz.rs') client = ollama.Client(host='https://ollama.krov.dmz.rs')
response = client.chat(model='llama2-uncensored:latest', messages=[{'role':'user','content':f'{msg[4:]}'}]) response = client.chat(model='llama2-uncensored:latest', messages=[{'role':'user','content':f'{msg[4:]}'}])
return(response['message']['content']) return(response['message']['content'])
elif msg.startswith("!wiki"):
cmd, query = msg.split(" ", 1)
return sf.query_external_website("https://en.wikipedia.org", "/wiki/" + query)
elif msg.startswith("!tasks"):
content = sf.getDmzTasks()
return content

View File

@@ -17,3 +17,17 @@ def query_external_website(base_url, query):
return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content.text + "\n\nFULL LINK:\n" + base_url + quote(query) return "\nTITLE:\n" + title + "\n\nCONTENT:\n" + content.text + "\n\nFULL LINK:\n" + base_url + quote(query)
except Exception as e: except Exception as e:
return e return e
def getDmzTasks():
try:
page = requests.get("https://todo.dmz.rs/")
soup = BeautifulSoup(page.content, "html.parser")
tasks = soup.find_all(class_="task")
result = "\nActive tasks:\n"
for task in tasks:
taskIndex = task.select("div")[0].text
taskTitle = task.select("div")[1].text
result += taskIndex + " " + taskTitle + "\n"
return result
except Exception as e:
return e