idk how an AI could do better front end experience than me but aight.

This commit is contained in:
ForeverPyrite
2025-01-07 16:00:16 -05:00
parent 613cfabb5a
commit 8aae2cea1c
4 changed files with 268 additions and 201 deletions

View File

@@ -1,102 +1,134 @@
import os # To parse video ids
import re import re
import threading
import pytz # Youtube Transcript stuff import
from datetime import datetime import youtube_transcript_api._errors
from dotenv import load_dotenv from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api import YouTubeTranscriptApi, _errors
from youtube_transcript_api.formatters import TextFormatter from youtube_transcript_api.formatters import TextFormatter
from openai import AssistantEventHandler, OpenAI
# OpenAI API stuff import
from openai import AssistantEventHandler
from openai import OpenAI
### For streaming
from typing_extensions import override from typing_extensions import override
import asyncio import asyncio
awaiter = asyncio.run
# The StreamOutput class to handle streaming
class StreamOutput:
def __init__(self):
self.delta: str = ""
self.response: str = ""
self.done: bool = False
self.buffer: list = []
def reset(self):
self.delta = ""
self.response = ""
self.done = False
self.buffer: list = []
async def send_delta(self, delta):
self.delta = delta
self.response += delta
def get_index(list):
if len(list) == 0:
return 0
else:
return len(list)-1
if self.buffer != []:
try:
if self.delta != self.buffer[get_index(self.buffer)]:
self.buffer.append(delta)
except IndexError as index_error:
log(f"\nCaught IndexError: {str(index_error)}")
self.buffer.append(delta)
else: self.buffer.append(delta)
# To get the env var
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv() load_dotenv()
# For logging # For logging
def log(message: str): import pytz
timestamp = datetime.now(pytz.timezone('America/New_York')).strftime('%Y-%m-%d %H:%M:%S') from datetime import datetime
def log(message):
try:
with open("logs/log.md", "a") as file: with open("logs/log.md", "a") as file:
file.write(f"{timestamp} - {message}\n") file.write(message)
except FileNotFoundError:
with open("logs/log.md", "x+"):
log(message)
# StreamOutput class to handle streaming ### OpenAI Config
class StreamOutput:
def __init__(self):
self.response = ""
self.done = False
self.buffer = []
self.lock = threading.Lock()
def reset(self): # Setting up OpenAI Client with API Key
with self.lock: client = OpenAI(
self.response = "" organization='org-7ANUFsqOVIXLLNju8Rvmxu3h',
self.done = False project="proj_NGz8Kux8CSka7DRJucAlDCz6",
self.buffer = [] api_key=os.getenv("OPENAI_API_KEY")
)
def add_to_buffer(self, delta: str): # screw bardo assistant that is configured to make notes and 5Q&A based on any given YouTube Transcript
with self.lock: asst_screw_bardo_id = "asst_JGFaX6uOIotqy5mIJnu3Yyp7"
self.response += delta
self.buffer.append(delta) # This is copy and pasted straight up from the quickstart guide, just appending to an output buffer instead of directly printing:
class EventHandler(AssistantEventHandler):
@override
def on_text_created(self, text) -> None:
awaiter(output_stream.send_delta("Response Recieved:\n\nScrew-Bardo:\n\n"))
@override
def on_text_delta(self, delta, snapshot):
awaiter(output_stream.send_delta(delta.value))
def on_tool_call_created(self, tool_call):
raise Exception("Assistant shouldn't be calling tools.")
def create_and_stream(transcript):
with client.beta.threads.create_and_run_stream(
assistant_id=asst_screw_bardo_id,
thread={
"messages": [{"role": "user", "content": transcript}]
},
event_handler=EventHandler()
) as stream:
stream.until_done()
output_stream.done = True
def get_video_id(url):
youtu_be = r'(?<=youtu.be/)([A-Za-z0-9_-]{11})'
youtube_com = r'(?<=youtube\.com\/watch\?v=)([A-Za-z0-9_-]{11})'
id = re.search(youtu_be, url)
if not id:
id = re.search(youtube_com, url)
if not id:
# Couldn't parse video ID from URL
return None
return id.group(1)
# Takes the transcript and formats it in basic text before writing it to auto-transcript.txt
def get_auto_transcript(video_id):
trans_api_errors = youtube_transcript_api._errors
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['en'], proxies=None, cookies=None, preserve_formatting=False)
except trans_api_errors.TranscriptsDisabled as e:
log(f'\n\n# Exception while fetching transcript:\n \n{e}\n')
return None
formatter = TextFormatter() # Ensure that you create an instance of TextFormatter
txt_transcript = formatter.format_transcript(transcript)
return txt_transcript
output_stream = StreamOutput() output_stream = StreamOutput()
# OpenAI Client Configuration log(f"\n\n# Main initilized at {datetime.now(pytz.timezone('America/New_York')).strftime('%Y-%m-%d %H:%M:%S')}. Presumeably application starting.\n")
client = OpenAI(
organization='org-7ANUFsqOVIXLLNju8Rvmxu3h',
project="proj_NGz8Kux8CSka7DRJucAlDCz6",
api_key=os.getenv("OPENAI_API_KEY")
)
asst_screw_bardo_id = "asst_JGFaX6uOIotqy5mIJnu3Yyp7"
# Async helper
def awaiter(coro):
asyncio.run(coro)
# EventHandler for OpenAI Assistant
class EventHandler(AssistantEventHandler):
@override
def on_text_created(self, text) -> None:
awaiter(output_stream.send_delta("Response Received:\n\nScrew-Bardo:\n\n"))
@override
def on_text_delta(self, delta, snapshot):
awaiter(output_stream.send_delta(delta.value))
def on_tool_call_created(self, tool_call):
raise Exception("Assistant shouldn't be calling tools.")
def create_and_stream(transcript: str):
try:
with client.beta.threads.create_and_run_stream(
assistant_id=asst_screw_bardo_id,
thread={
"messages": [{"role": "user", "content": transcript}]
},
event_handler=EventHandler()
) as stream:
stream.until_done()
output_stream.done = True
except Exception as e:
log(f"Error in create_and_stream: {e}")
output_stream.done = True
def get_video_id(url: str) -> str:
youtu_be = r'(?<=youtu.be/)([A-Za-z0-9_-]{11})'
youtube_com = r'(?<=youtube\.com\/watch\?v=)([A-Za-z0-9_-]{11})'
match = re.search(youtu_be, url) or re.search(youtube_com, url)
if match:
return match.group(1)
return None
def get_auto_transcript(video_id: str) -> str:
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['en'])
formatter = TextFormatter()
return formatter.format_transcript(transcript)
except _errors.TranscriptsDisabled as e:
log(f'Exception while fetching transcript: {e}')
except Exception as e:
log(f'Unexpected error while fetching transcript: {e}')
return None

View File

@@ -2,29 +2,27 @@
<html lang="en-us"> <html lang="en-us">
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<title>Screw You Bardo</title> <title>Screw You Bardo</title>
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}"> <link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
<link rel="icon" type="image/x-icon" href="https://www.foreverpyrite.com/favicon.ico"> <link rel="icon" type="image/x-icon" href="https://www.foreverpyrite.com/favicon.ico">
<script src="{{ url_for('static', filename='script.js')}}"></script> <script defer src="{{ url_for('static', filename='script.js') }}"></script>
</head> </head>
<body> <body>
<main class="container">
<section class="response-section">
<pre id="response-area">Response will appear here.</pre>
</section>
<div class="content"> <section class="form-section">
<pre id="response-area">Response will appear here. </pre> <form id="url-form">
<div class="form_box"> <input type="url" id="url_box" name="url" placeholder="Paste the lecture URL here." required autofocus>
<input id="url_box" placeholder="Paste the lecture URL here." autofocus></input> <button type="submit" id="submit">Submit</button>
<input id="submit" type="submit" onclick=""></input> </form>
</div> </section>
</main>
</div>
</body> </body>
</html> </html>

View File

@@ -1,18 +1,24 @@
document.addEventListener("DOMContentLoaded", (event) => { document.addEventListener("DOMContentLoaded", () => {
const response_area = document.getElementById('response-area'); const responseArea = document.getElementById('response-area');
const submit_button = document.getElementById('submit') const submitButton = document.getElementById('submit');
submit_button.addEventListener('click', function() { const urlForm = document.getElementById('url-form');
var url = document.getElementById('url_box').value; const urlBox = document.getElementById('url_box');
urlForm.addEventListener('submit', function(event) {
event.preventDefault(); // Prevent form from submitting the traditional way
const url = urlBox.value.trim();
if (!url) { if (!url) {
response_area.innerText = 'Please enter a URL.'; responseArea.innerText = 'Please enter a URL.';
return; return;
} }
else {
document.getElementById('url_box').value = "";
}
// First, process the URL // Clear the input and update UI
urlBox.value = "";
submitButton.disabled = true;
responseArea.innerText = 'Processing...';
// Process the URL
fetch('/process_url', { fetch('/process_url', {
method: 'POST', method: 'POST',
headers: { headers: {
@@ -24,57 +30,58 @@ document.addEventListener("DOMContentLoaded", (event) => {
if (!response.ok) { if (!response.ok) {
throw new Error('Network response was not ok'); throw new Error('Network response was not ok');
} }
// Extract the text from the response body return response.text();
return response.text(); // Use .json() if the response is JSON
}) })
.then(text => { .then(text => {
submit_button.style.display = "none";
if (text === "Processing started. Check /stream_output for updates.") { if (text === "Processing started. Check /stream_output for updates.") {
streamOutput(response_area); streamOutput(responseArea);
} else { } else {
response_area.innerText = text; // Show any other response message responseArea.innerText = text;
submit_button.style.display = "flex"; submitButton.disabled = false;
} }
}) })
.catch(error => { .catch(error => {
console.error('Error processing URL:', error); console.error('Error processing URL:', error);
response_area.innerText = 'Error processing URL: ' + error.message; responseArea.innerText = 'Error processing URL: ' + error.message;
submit_button.style.display = "flex"; submitButton.disabled = false;
}); });
}); });
function streamOutput(responseArea) {
// Fetch the streaming output
fetch('/stream_output')
.then(response => {
if (!response.ok) {
throw new Error('Network response was not ok');
}
const reader = response.body.getReader();
const decoder = new TextDecoder("utf-8");
responseArea.innerHTML = "";
function readStream() {
reader.read().then(({ done, value }) => {
if (done) {
submitButton.disabled = false;
return;
}
const chunk = decoder.decode(value, { stream: true });
responseArea.innerHTML += chunk;
responseArea.scrollTop = responseArea.scrollHeight;
readStream();
}).catch(error => {
console.error('Error reading stream:', error);
responseArea.innerText = 'Error reading stream: ' + error.message;
submitButton.disabled = false;
});
}
readStream();
})
.catch(error => {
console.error('Error fetching stream:', error);
responseArea.innerText = 'Error fetching stream: ' + error.message;
submitButton.disabled = false;
});
}
}); });
function streamOutput(response_area) {
// Fetch the streaming output
const streamResponsePromise = fetch('/stream_output');
response_area.innerHTML = ""
streamResponsePromise
.then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder("utf-8");
function readStream() {
reader.read().then(({ done, value }) => {
if(done) {
document.getElementById('submit').style.display = "flex";
return
}
// Decode and process the chunk
const chunk = decoder.decode(value, { stream: true });
response_area.innerHTML += chunk;
response_area.scrollTop = response_area.scrollHeight
// Continue reading
readStream();
});
}
// Start reading the stream
readStream();
})
.catch(error => {
console.error('Error fetching stream:', error);
response_area.innerText = 'Error fetching stream: ' + error.message;
});
}

View File

@@ -1,7 +1,5 @@
@font-face { @font-face {
font-family: 'nimbus_sans_d_otlight'; font-family: 'NimbusSansD';
src: url('font-files/nimbus-sans-d-ot-light.woff2') format('woff2'), src: url('font-files/nimbus-sans-d-ot-light.woff2') format('woff2'),
url('font-files/nimbus-sans-d-ot-light.woff') format('woff'); url('font-files/nimbus-sans-d-ot-light.woff') format('woff');
font-weight: normal; font-weight: normal;
@@ -9,70 +7,102 @@
} }
* { * {
font-family: 'nimbus_sans_d_otlight'; box-sizing: border-box;
color: white; margin: 0;
padding: 0;
font-family: 'NimbusSansD', sans-serif;
color: #FFFFFF;
} }
body { body {
display: flex; display: flex;
flex-direction: column; justify-content: center;
width: 100%; align-items: center;
max-width: 100vw; height: 100vh;
height: 100%; background-color: #1F1F1F;
min-height: 100vh;
max-height: 100vh;
margin: 0;
background-color: rgb(31, 31, 31);
} }
body .content { .container {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-self: center; width: 85vw;
width: 75%; height: 90vh;
max-width: 65vw; background-color: #2E2E2E;
height: 100%; border-radius: 10px;
min-height: 100vh; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
max-height: 100vh; overflow: hidden;
}
.response-section {
flex: 1;
padding: 20px;
background-color: #1E1E1E;
overflow-y: auto;
font-size: 1rem;
line-height: 1.5;
}
.form-section {
padding: 15px 20px;
background-color: #3A3A3A;
} }
#response-area { #response-area {
display: block; white-space: pre-wrap;
height: 90%;
min-height: 90vh;
text-wrap: wrap;
flex-wrap: wrap;
align-content: flex-end;
overflow-y: auto;
} }
.form_box { #url-form {
display: flex; display: flex;
width: 100%; gap: 10px;
justify-content: space-between;
align-content: space-around;
} }
#url_box { #url_box {
display: flex; flex: 1;
height: 5%; padding: 10px 15px;
min-height: 5vh; border: none;
width: 90%; border-radius: 5px;
min-width: 80vh; background-color: #4A4A4A;
background-color: rgb(31, 31, 31); color: #FFFFFF;
font-size: 1rem;
outline: none;
}
#url_box::placeholder {
color: #B0B0B0;
} }
#submit { #submit {
display: flex; padding: 10px 20px;
width: 5%; border: none;
min-width: 3vw; border-radius: 5px;
background-color: rgb(49, 49, 49); background-color: #5A5A5A;
} color: #FFFFFF;
#submit:hover { font-size: 1rem;
cursor: pointer; cursor: pointer;
background-color: rgb(31, 31, 31); transition: background-color 0.3s ease;
} }
input { #submit:hover {
border-radius: 15px; background-color: #7A7A7A;
}
#submit:disabled {
background-color: #3A3A3A;
cursor: not-allowed;
}
/* Responsive Adjustments */
@media (max-width: 600px) {
.container {
height: 95vh;
}
#url_box {
font-size: 0.9rem;
}
#submit {
font-size: 0.9rem;
padding: 10px;
}
} }