This commit is contained in:
2025-10-08 15:45:47 -04:00
parent 190eb8acd2
commit 5d881369e9
4 changed files with 1923 additions and 99 deletions

View File

@@ -7,9 +7,137 @@ import regex
from util.sing_util import Utility
from discord.ext import bridge, commands
from disc_havoc import Havoc
import random
import string
import json
import os
BOT_CHANIDS = []
# Global storage for pagination data (survives across interactions)
PAGINATION_DATA = {}
PAGINATION_FILE = "pagination_data.json"
def load_pagination_data():
"""Load pagination data from JSON file"""
global PAGINATION_DATA
try:
if os.path.exists(PAGINATION_FILE):
with open(PAGINATION_FILE, 'r') as f:
PAGINATION_DATA = json.load(f)
except Exception:
PAGINATION_DATA = {}
def save_pagination_data():
"""Save pagination data to JSON file (excluding non-serializable embeds)"""
try:
# Create serializable copy without embeds
serializable_data = {}
for key, value in PAGINATION_DATA.items():
serializable_data[key] = {
"pages": value["pages"],
"song": value["song"],
"artist": value["artist"],
"total_pages": value["total_pages"]
}
with open(PAGINATION_FILE, 'w') as f:
json.dump(serializable_data, f)
except Exception:
pass
import logging
from typing import Optional, Union
from regex import Pattern
import discord
import regex
from util.sing_util import Utility
from discord.ext import bridge, commands
from disc_havoc import Havoc
import random
import string
BOT_CHANIDS = []
# Global storage for pagination data (survives across interactions)
PAGINATION_DATA: dict[str, dict] = {}
class LyricsPaginator(discord.ui.View):
def __init__(self, pages, song, artist):
super().__init__(timeout=None) # No timeout - buttons work permanently
self.pages = pages
self.song = song
self.artist = artist
self.current_page = 0
# Generate a unique ID for this paginator instance
self.custom_id_base = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
# Pre-generate all embeds to avoid creating them on each page turn
self.embeds = []
# URL encode artist and song for the link
import urllib.parse
encoded_artist = urllib.parse.quote(self.artist, safe='')
encoded_song = urllib.parse.quote(self.song, safe='')
lyrics_url = f"https://codey.lol/#{encoded_artist}/{encoded_song}"
for i, page in enumerate(pages):
embed = discord.Embed(title=f"{self.song}", color=discord.Color.blue(), url=lyrics_url)
embed.set_author(name=self.artist)
embed.description = page
# Set footer with just page info for multi-page, or no footer for single page
if len(pages) > 1:
embed.set_footer(text=f"Page {i + 1} of {len(pages)}", icon_url="https://codey.lol/favicon.ico")
self.embeds.append(embed)
# Store pagination data for persistence across bot restarts
if len(pages) > 1:
# Store data (embeds will be recreated as needed since they can't be JSON serialized)
serializable_data = {
"pages": pages,
"song": song,
"artist": artist,
"total_pages": len(pages)
}
# Store in memory with embeds for performance
PAGINATION_DATA[self.custom_id_base] = {
**serializable_data,
"embeds": self.embeds
}
# Save serializable data to file
save_pagination_data()
def get_view_for_page(self, page_num: int):
"""Create a view with properly configured buttons for the given page"""
view = discord.ui.View(timeout=None)
if len(self.pages) > 1:
# Previous button
prev_button = discord.ui.Button(
label="Previous",
style=discord.ButtonStyle.secondary,
emoji="⬅️",
disabled=(page_num == 0),
custom_id=f"lyrics_prev:{self.custom_id_base}",
row=0
)
# Next button
next_button = discord.ui.Button(
label="Next",
style=discord.ButtonStyle.primary,
emoji="➡️",
disabled=(page_num == len(self.pages) - 1),
custom_id=f"lyrics_next:{self.custom_id_base}",
row=0
)
view.add_item(prev_button)
view.add_item(next_button)
return view
class Sing(commands.Cog):
"""Sing Cog for Havoc"""
@@ -24,6 +152,84 @@ class Sing(commands.Cog):
regex.UNICODE,
)
# Persistent interactions handled by on_interaction listener
@commands.Cog.listener()
async def on_interaction(self, interaction: discord.Interaction):
"""Handle persistent lyrics pagination interactions"""
if interaction.type != discord.InteractionType.component:
return
custom_id = interaction.data.get("custom_id", "")
if not (custom_id.startswith("lyrics_prev:") or custom_id.startswith("lyrics_next:")):
return
try:
# Extract the base custom ID and direction
if custom_id.startswith("lyrics_prev:"):
direction = -1
base_id = custom_id.replace("lyrics_prev:", "")
else: # lyrics_next:
direction = 1
base_id = custom_id.replace("lyrics_next:", "")
# Get pagination data from JSON persistence
data = PAGINATION_DATA.get(base_id)
if not data:
await interaction.response.send_message(
"⚠️ **Navigation Expired**\n"
"These buttons are from a previous session. "
"Use `/s <song>` or `/sing <artist> : <song>` to get fresh lyrics!",
ephemeral=True,
)
return
# Recreate embeds if they don't exist (loaded from JSON)
if "embeds" not in data or not data["embeds"]:
paginator = LyricsPaginator(data["pages"], data["song"], data["artist"])
paginator.custom_id_base = base_id
data["embeds"] = paginator.embeds
# Update in-memory data
PAGINATION_DATA[base_id] = data
# Get current page from embed footer
if not interaction.message or not interaction.message.embeds:
await interaction.response.defer()
return
current_embed = interaction.message.embeds[0]
footer_text = current_embed.footer.text if current_embed.footer else ""
# Extract current page number (format: "Page X of Y")
current_page = 0
if "Page " in footer_text and " of " in footer_text:
try:
page_part = footer_text.split("Page ")[1].split(" of ")[0]
current_page = int(page_part) - 1
except:
pass
# Calculate new page
new_page = current_page + direction
total_pages = len(data["pages"])
if new_page < 0 or new_page >= total_pages:
await interaction.response.defer() # Just acknowledge, don't change anything
return
# Create new paginator and get view for new page
paginator = LyricsPaginator(data["pages"], data["song"], data["artist"])
paginator.custom_id_base = base_id # Preserve same custom ID
view = paginator.get_view_for_page(new_page)
# Update the message
await interaction.response.edit_message(embed=data["embeds"][new_page], view=view)
except Exception as e:
try:
await interaction.response.send_message(f"Error handling pagination: {str(e)}", ephemeral=True)
except:
pass
def is_spamchan(): # type: ignore
"""Check if channel is spam chan"""
@@ -60,18 +266,18 @@ class Sing(commands.Cog):
"**Error**: No song specified, no activity found to read."
)
await ctx.respond(
"*Searching...*"
) # Must respond to interactions within 3 seconds, per Discord
# Initial response that we'll edit later
message = await ctx.respond("*Searching for lyrics...*")
parsed = self.utility.parse_song_input(song, activity)
if isinstance(parsed, tuple):
(search_artist, search_song, search_subsearch) = parsed
# Update the message to show what we're searching for
await message.edit(content=f"*Searching for '{search_song}' by {search_artist}...*")
# await ctx.respond(f"So, {search_song} by {search_artist}? Subsearch: {search_subsearch} I will try...") # Commented, useful for debugging
search_result: Optional[list] = await self.utility.lyric_search(
search_artist, search_song, search_subsearch
search_artist, search_song, search_subsearch, is_spam_channel=(ctx.channel.id in BOT_CHANIDS)
)
if not search_result:
@@ -97,29 +303,26 @@ class Sing(commands.Cog):
search_result_wrapped_short: list[str] = search_result[
2
] # Third is short wrapped lyrics
if ctx.channel.id not in BOT_CHANIDS:
short_lyrics = " ".join(
search_result_wrapped_short
) # Replace with shortened lyrics for non spamchans
short_lyrics = regex.sub(
r"\p{Vert_Space}", " / ", short_lyrics.strip()
)
return await ctx.respond(
f"**{search_result_song}** by **{search_result_artist}**\n-# {short_lyrics}"
)
# Now all channels get full pagination - removed the early return restriction
out_messages: list = []
footer: str = "" # Placeholder
for c, section in enumerate(search_result_wrapped):
if c == len(search_result_wrapped):
footer = f"`Found on: {search_result_src}`"
section = regex.sub(r"\p{Vert_Space}", " / ", section.strip())
msg: str = f"**{search_result_song}** by **{search_result_artist}**\n-# {section}\n{footer}"
if c > 1:
msg = "\n".join(msg.split("\n")[1:])
out_messages.append(msg.strip())
for msg in out_messages:
await ctx.send(msg)
# Use the appropriate wrapped lyrics based on channel type
is_spam_channel = ctx.channel.id in BOT_CHANIDS
if is_spam_channel:
# Spam channels get full-length lyrics
pages = search_result_wrapped.copy()
else:
# Non-spam channels get shorter lyrics for better UX
pages = search_result_wrapped.copy() # For now, still use full but we'll modify limits later
# Add source info to last page
if pages:
pages[-1] += f"\n\n`Found on: {search_result_src}`"
# Create and send view with paginator
paginator = LyricsPaginator(pages, search_result_song, search_result_artist)
view = paginator.get_view_for_page(0)
# Edit the existing message to show the lyrics
await message.edit(content=None, embed=paginator.embeds[0], view=view)
except Exception as e:
traceback.print_exc()
await ctx.respond(f"ERR: {str(e)}")
@@ -149,26 +352,29 @@ class Sing(commands.Cog):
)
member_id: int = member.id # if not(member.id == PODY_ID) else 1234134345497837679 # Use Thomas for Pody!
activity: Optional[discord.Activity] = None
if IS_SPAMCHAN:
await ctx.respond(f"***Reading activity of {member_display}...***")
# Initial response that we'll edit later
message = await ctx.respond(f"*Reading activity of {member_display}...*" if IS_SPAMCHAN else "*Processing...*")
for _activity in ctx.interaction.guild.get_member(member_id).activities:
if _activity.type == discord.ActivityType.listening:
activity = _activity
parsed: Union[tuple, bool] = self.utility.parse_song_input(
song=None, activity=activity
)
if not parsed:
if IS_SPAMCHAN:
return await message.edit(content=f"Could not parse activity of {member_display}.")
else:
return await ctx.respond(
f"Could not parse activity of {member_display}.", ephemeral=True
)
if isinstance(parsed, tuple):
(search_artist, search_song, search_subsearch) = parsed
await ctx.respond(
"*Searching...*"
) # Must respond to interactions within 3 seconds, per Discord
await message.edit(content=f"*Searching for '{search_song}' by {search_artist}...*")
search_result: Optional[list] = await self.utility.lyric_search(
search_artist, search_song, search_subsearch
search_artist, search_song, search_subsearch, is_spam_channel=(ctx.channel.id in BOT_CHANIDS)
)
if not search_result:
await ctx.respond("ERR: No search result")
@@ -193,33 +399,24 @@ class Sing(commands.Cog):
2
] # Third index is shortened lyrics
if not IS_SPAMCHAN:
short_lyrics = " ".join(
search_result_wrapped_short
) # Replace with shortened lyrics for non spamchans
short_lyrics = regex.sub(
r"\p{Vert_Space}", " / ", short_lyrics.strip()
)
return await ctx.respond(
f"**{search_result_song}** by **{search_result_artist}**\n-# {short_lyrics}"
)
# All channels now get full paginated response
out_messages: list = []
footer: str = ""
c: int = 0
for section in search_result_wrapped:
c += 1
if c == len(search_result_wrapped):
footer = f"`Found on: {search_result_src}`"
# if ctx.guild.id == 1145182936002482196:
# section = section.upper()
section = regex.sub(r"\p{Vert_Space}", " / ", section.strip())
msg: str = f"**{search_result_song}** by **{search_result_artist}**\n-# {section}\n{footer}"
if c > 1:
msg = "\n".join(msg.split("\n")[1:])
out_messages.append(msg.strip())
for msg in out_messages:
await ctx.send(msg)
# Create paginator for lyrics
if not search_result_wrapped:
return await ctx.respond("No lyrics found.")
# Use the already smartly-wrapped pages from sing_util
pages = search_result_wrapped.copy() # Already processed by _smart_lyrics_wrap
# Add source info to last page
if pages:
pages[-1] += f"\n\n`Found on: {search_result_src}`"
# Create and send view with paginator
paginator = LyricsPaginator(pages, search_result_song, search_result_artist)
view = paginator.get_view_for_page(0)
# Edit the existing message to show the lyrics
await message.edit(content=None, embed=paginator.embeds[0], view=view)
except Exception as e:
traceback.print_exc()
return await ctx.respond(f"ERR: {str(e)}")
@@ -227,4 +424,5 @@ class Sing(commands.Cog):
def setup(bot) -> None:
"""Run on Cog Load"""
load_pagination_data() # Load existing pagination data from file
bot.add_cog(Sing(bot))

View File

@@ -25,7 +25,7 @@ cogs_list: list[str] = [
"sing",
"meme",
"lovehate",
"ollama",
# "ollama",
# "radio",
]

View File

@@ -1,10 +1,12 @@
import logging
import regex
import aiohttp
import textwrap
import traceback
import aiohttp
import regex
import discord
from discord import Activity
from typing import Optional, Union
logger = logging.getLogger(__name__)
class Utility:
@@ -14,9 +16,134 @@ class Utility:
self.api_url: str = "http://127.0.0.1:52111/lyric/search"
self.api_src: str = "DISC-HAVOC"
def _smart_lyrics_wrap(self, lyrics: str, max_length: int = 1500, single_page: bool = False, max_verses: int = 100, max_lines: int = 150) -> list[str]:
"""
Intelligently wrap lyrics to avoid breaking verses in the middle
Prioritizes keeping verses intact over page length consistency
Args:
lyrics: Raw lyrics text
max_length: Maximum character length per page (soft limit)
single_page: If True, return only the first page
Returns:
List of lyrics pages
"""
if not lyrics:
return []
# Strip markdown formatting from lyrics
lyrics = discord.utils.escape_markdown(lyrics)
verses = []
current_verse: list[str] = []
# Handle both regular newlines and zero-width space newlines
lines = lyrics.replace("\u200b\n", "\n").split("\n")
empty_line_count = 0
for line in lines:
stripped_line = line.strip()
if not stripped_line or stripped_line in ["", "\u200b"]:
empty_line_count += 1
# One empty line indicates a section break (be more aggressive)
if empty_line_count >= 1 and current_verse:
verses.append("\n".join(current_verse))
current_verse = []
empty_line_count = 0
else:
empty_line_count = 0
current_verse.append(stripped_line)
# Add the last verse if it exists
if current_verse:
verses.append("\n".join(current_verse))
# If we have too few verses (verse detection failed), fallback to line-based splitting
if len(verses) <= 1:
all_lines = lyrics.split("\n")
verses = []
current_chunk = []
for line in all_lines:
current_chunk.append(line.strip())
# Split every 8-10 lines to create artificial "verses"
if len(current_chunk) >= 8:
verses.append("\n".join(current_chunk))
current_chunk = []
# Add remaining lines
if current_chunk:
verses.append("\n".join(current_chunk))
if not verses:
return [lyrics[:max_length]]
# If single page requested, return first verse or truncated
if single_page:
result = verses[0]
if len(result) > max_length:
# Try to fit at least part of the first verse
lines_in_verse = result.split("\n")
truncated_lines = []
current_length = 0
for line in lines_in_verse:
if current_length + len(line) + 1 <= max_length - 3: # -3 for "..."
truncated_lines.append(line)
current_length += len(line) + 1
else:
break
result = "\n".join(truncated_lines) + "..." if truncated_lines else verses[0][:max_length-3] + "..."
return [result]
# Group complete verses into pages, never breaking a verse
# Limit by character count, verse count, AND line count for visual appeal
max_verses_per_page = max_verses
max_lines_per_page = max_lines
pages = []
current_page_verses: list[str] = []
current_page_length = 0
current_page_lines = 0
for verse in verses:
verse_length = len(verse)
# Count lines properly - handle both regular newlines and zero-width space newlines
verse_line_count = verse.count("\n") + verse.count("\u200b\n") + 1
# Calculate totals if we add this verse (including separator)
separator_length = 3 if current_page_verses else 0 # "\n\n\n" between verses
separator_lines = 3 if current_page_verses else 0 # 3 empty lines between verses
total_length_with_verse = current_page_length + separator_length + verse_length
total_lines_with_verse = current_page_lines + separator_lines + verse_line_count
# Check all three limits: character, verse count, and line count
exceeds_length = total_length_with_verse > max_length
exceeds_verse_count = len(current_page_verses) >= max_verses_per_page
exceeds_line_count = total_lines_with_verse > max_lines_per_page
# If adding this verse would exceed any limit AND we already have verses on the page
if (exceeds_length or exceeds_verse_count or exceeds_line_count) and current_page_verses:
# Finish current page with existing verses
pages.append("\n\n".join(current_page_verses))
current_page_verses = [verse]
current_page_length = verse_length
current_page_lines = verse_line_count
else:
# Add verse to current page
current_page_verses.append(verse)
current_page_length = total_length_with_verse
current_page_lines = total_lines_with_verse
# Add the last page if it has content
if current_page_verses:
pages.append("\n\n".join(current_page_verses))
return pages if pages else [lyrics[:max_length]]
def parse_song_input(
self, song: Optional[str] = None, activity: Optional[Activity] = None
) -> Union[bool, tuple]:
self, song: str | None = None, activity: Activity | None = None,
) -> bool | tuple:
"""
Parse Song (Sing Command) Input
@@ -36,7 +163,7 @@ class Utility:
match activity.name.lower():
case "codey toons" | "cider" | "sonixd":
search_artist: str = " ".join(
str(activity.state).strip().split(" ")[1:]
str(activity.state).strip().split(" ")[1:],
)
search_artist = regex.sub(
r"(\s{0,})(\[(spotify|tidal|sonixd|browser|yt music)])$",
@@ -51,13 +178,13 @@ class Utility:
search_song = str(activity.details)
song = f"{search_artist} : {search_song}"
case "spotify":
if not activity.title or not activity.artist: # type: ignore
if not activity.title or not activity.artist: # type: ignore[attr-defined]
"""
Attributes exist, but mypy does not recognize them. Ignored.
"""
return False
search_artist = str(activity.artist) # type: ignore
search_song = str(activity.title) # type: ignore
search_artist = str(activity.artist) # type: ignore[attr-defined]
search_song = str(activity.title) # type: ignore[attr-defined]
song = f"{search_artist} : {search_song}"
case "serious.fm" | "cocks.fm" | "something":
if not activity.details:
@@ -78,27 +205,27 @@ class Utility:
return False
search_artist = song.split(search_split_by)[0].strip()
search_song = "".join(song.split(search_split_by)[1:]).strip()
search_subsearch: Optional[str] = None
search_subsearch: str | None = None
if (
search_split_by == ":" and len(song.split(":")) > 2
): # Support sub-search if : is used (per instructions)
search_song = song.split(
search_split_by
search_split_by,
)[
1
].strip() # Reduce search_song to only the 2nd split of : [the rest is meant to be lyric text]
search_subsearch = "".join(
song.split(search_split_by)[2:]
song.split(search_split_by)[2:],
) # Lyric text from split index 2 and beyond
return (search_artist, search_song, search_subsearch)
except Exception as e:
logging.debug("Exception: %s", str(e))
logger.debug("Exception: %s", str(e))
traceback.print_exc()
return False
async def lyric_search(
self, artist: str, song: str, sub: Optional[str] = None
) -> Optional[list]:
self, artist: str, song: str, sub: str | None = None, is_spam_channel: bool = True,
) -> list | None:
"""
Lyric Search
@@ -140,7 +267,7 @@ class Utility:
return [(f"ERR: {response.get('errorText')}",)]
out_lyrics = regex.sub(
r"<br>", "\u200b\n", response.get("lyrics", "")
r"<br>", "\u200b\n", response.get("lyrics", ""),
)
response_obj: dict = {
"artist": response.get("artist"),
@@ -154,24 +281,15 @@ class Utility:
lyrics = response_obj.get("lyrics")
if not lyrics:
return None
response_obj["lyrics"] = textwrap.wrap(
text=lyrics.strip(),
width=1500,
drop_whitespace=False,
replace_whitespace=False,
break_long_words=True,
break_on_hyphens=True,
max_lines=8,
)
response_obj["lyrics_short"] = textwrap.wrap(
text=lyrics.strip(),
width=750,
drop_whitespace=False,
replace_whitespace=False,
break_long_words=True,
break_on_hyphens=True,
max_lines=1,
)
# Use different limits based on channel type
if is_spam_channel:
# Spam channels: higher limits for more content per page
response_obj["lyrics"] = self._smart_lyrics_wrap(lyrics.strip(), max_length=8000, max_verses=100, max_lines=150)
else:
# Non-spam channels: much shorter limits for better UX in regular channels
response_obj["lyrics"] = self._smart_lyrics_wrap(lyrics.strip(), max_length=2000, max_verses=15, max_lines=25)
response_obj["lyrics_short"] = self._smart_lyrics_wrap(lyrics.strip(), max_length=500, single_page=True)
return [
(
@@ -186,4 +304,4 @@ class Utility:
]
except Exception as e:
traceback.print_exc()
return [f"Retrieval failed: {str(e)}"]
return [f"Retrieval failed: {e!s}"]

1508
uv.lock generated Normal file

File diff suppressed because it is too large Load Diff