television

a simple LAN/WLAN desktop public broadcasting service
Log | Files | Refs | README

commit 04501be6c062204396ff4b8d3ab2bb069c9205b6
parent bd4473e8fad6276eaad45127a23453c7e971526b
Author: ugrnm <ultrageranium@bleu255.com>
Date:   Mon, 16 Sep 2024 22:25:10 +0200

fetching and busting

Diffstat:
Mwww/index.html | 2+-
Mwww/style.css | 1+
Mwww/tv.js | 38++++++++++++++++++++++++++++----------
3 files changed, 30 insertions(+), 11 deletions(-)

diff --git a/www/index.html b/www/index.html @@ -7,8 +7,8 @@ <script src="/tv.js"></script> </head> <body> - <img id="television" src="/tv.jpg" alt="television"> <img id="nosignal" src="/nosignal.jpg" alt="no signal"> + <img id="television" src="/tv.jpg" alt="television"> </body> </html> diff --git a/www/style.css b/www/style.css @@ -20,6 +20,7 @@ img { img#nosignal { z-index: 5; + visibility: hidden; } img#television { diff --git a/www/tv.js b/www/tv.js @@ -1,16 +1,35 @@ -// Fetch a new tv.jpg every second -// In theory, fetch() combined with a "no-store" cache directive should be -// enough, and it is fine on some browsers like chromium. However on firefox -// even though the file is effectly re-downloaded and not put in cache, -// the browser does not refresh the image. To force refresh it we have to -// update its src with bogus URL crap. Great. +// Fetch a new tv.jpg every second - a non-obvious HOWTO // +// In theory, fetch() combined with a "no-store" cache directive should be +// enough to 1. force re-download the image and 2. bypass the cache entirely. +// However in practice, even though FF and chromium are properly re-downloading +// the new file without using their cache, they will *not* refresh the view. +// To force redraw the image we need to rewrite the img src with a classic +// cache busting garbage URL trick. If we do, then FF and chromium *do* display +// the new image, but then it means we have downloaded the file twice! +// This is obviously super wasteful. A workaround to limit the damages, is to +// use a different method for fetching. By default it uses GET, but if we use +// HEAD instead, it won't download the file, it will just get the headers, +// thus validating the response, and as follow-up function we can use +// our traditional cache busting src rewrite. It's still two http requests, but +// only one file download. +// +// Could it be done differently? +// setInterval(function(){ - fetch("/tv.jpg", { cache: "no-store", mode: "no-cors" }); - document.getElementById("television").src="/tv.jpg#" + Date.now(); + tv = "/tv.jpg"; + fetch(tv, { cache: "no-store", method: "HEAD" }) + .then((response) => { + document.getElementById("nosignal").style.visibility = "hidden"; + document.getElementById("television").style.visibility = "visible"; + document.getElementById("television").src=tv + "#" + Date.now(); + }) + .catch((error) => { + document.getElementById("television").style.visibility = "hidden"; + document.getElementById("nosignal").style.visibility = "visible"; + }); }, 1000); - // Press 'f' to enter fullscreen // Press 'f' again to exit fullscreen // @@ -31,4 +50,3 @@ document.addEventListener( }, false, ); -