better 200w behavior

master
Aevann 2023-01-01 13:30:33 +02:00
parent 2b97c46912
commit b0b70d2f0f
2 changed files with 4 additions and 3 deletions

View File

@ -142,7 +142,7 @@ async function searchGifs(searchTerm) {
} }
else { else {
for (let i = 0; i < 48; i++) { for (let i = 0; i < 48; i++) {
gifURL[i] = "https://media.giphy.com/media/" + data[i].id + "/200w.webp"; gifURL[i] = "https://media.giphy.com/media/" + data[i].id + "/giphy.webp";
const insert = `<img class="giphy" loading="lazy" data-bs-dismiss="modal" src="${gifURL[i]}"></div>` const insert = `<img class="giphy" loading="lazy" data-bs-dismiss="modal" src="${gifURL[i]}"></div>`
container.insertAdjacentHTML('beforeend', insert); container.insertAdjacentHTML('beforeend', insert);
noGIFs.innerHTML = null; noGIFs.innerHTML = null;

View File

@ -313,6 +313,8 @@ def sanitize(sanitized, golden=True, limit_pings=0, showmore=True, count_marseys
tag = tag.replace_with(a) tag = tag.replace_with(a)
a.append(tag) a.append(tag)
tag["data-src"] = tag["data-src"].replace('/giphy.webp', '/200w.webp')
for tag in soup.find_all("a"): for tag in soup.find_all("a"):
if not tag.contents or not str(tag.contents[0]).strip(): if not tag.contents or not str(tag.contents[0]).strip():
tag.extract() tag.extract()
@ -476,8 +478,7 @@ def normalize_url(url):
.replace("https://nitter.net/", "https://twitter.com/") \ .replace("https://nitter.net/", "https://twitter.com/") \
.replace("https://nitter.42l.fr/", "https://twitter.com/") \ .replace("https://nitter.42l.fr/", "https://twitter.com/") \
.replace("https://nitter.lacontrevoie.fr/", "https://twitter.com/") \ .replace("https://nitter.lacontrevoie.fr/", "https://twitter.com/") \
.replace("/giphy.gif", "/200w.webp") \ .replace("/giphy.gif", "/giphy.webp")
.replace("/giphy.webp", "/200w.webp") \
url = imgur_regex.sub(r'\1_d.webp?maxwidth=9999&fidelity=grand', url) url = imgur_regex.sub(r'\1_d.webp?maxwidth=9999&fidelity=grand', url)
url = giphy_regex.sub(r'\1.webp', url) url = giphy_regex.sub(r'\1.webp', url)