diff --git a/js/hang-demo/src/publish.html b/js/hang-demo/src/publish.html
index 233167419..11ec3fd81 100644
--- a/js/hang-demo/src/publish.html
+++ b/js/hang-demo/src/publish.html
@@ -22,7 +22,7 @@
Feel free to hard-code it if you have public access configured, like `url="https://relay.moq.dev/anon"`
NOTE: `http` performs an insecure certificate check. You must use `https` in production.
-->
- Tips:
- If you pay close enough attention, you'll notice that captions are being automatically generated.
+ Use <hang-publish captions> to enable captions generation.
This uses a VAD model to detect when you're speaking which is when the 🗣 emoji appears.
At the end of a sentence, we run another model to generate the caption which is available over the network.
- The `captions` attribute controls whether captions are displayed.
The AI models are executed using Transformers.js.
diff --git a/js/hang/src/publish/element.ts b/js/hang/src/publish/element.ts
index 2c0653435..b5c2e3854 100644
--- a/js/hang/src/publish/element.ts
+++ b/js/hang/src/publish/element.ts
@@ -334,6 +334,9 @@ export default class HangPublish extends HTMLElement {
const audio = effect.get(this.#audio);
if (!(audio instanceof Source.Microphone)) return;
+ const enabled = effect.get(this.broadcast.audio.enabled);
+ if (!enabled) return;
+
const devices = effect.get(audio.device.available);
if (!devices || devices.length < 2) return;
@@ -346,7 +349,9 @@ export default class HangPublish extends HTMLElement {
transform: "translateX(-50%)",
},
});
- effect.event(select, "change", () => audio.device.preferred.set(select.value));
+ effect.event(select, "change", () => {
+ audio.device.preferred.set(select.value);
+ });
for (const device of devices) {
const option = DOM.create("option", { value: device.deviceId }, device.label);
@@ -354,8 +359,8 @@ export default class HangPublish extends HTMLElement {
}
effect.effect((effect) => {
- const selected = effect.get(audio.device.selected);
- select.value = selected?.deviceId ?? "";
+ const active = effect.get(audio.device.requested);
+ select.value = active ?? "";
});
const caret = DOM.create("span", { style: { fontSize: "0.75em", cursor: "pointer" } }, "â–¼");
@@ -416,6 +421,9 @@ export default class HangPublish extends HTMLElement {
const video = effect.get(this.#video);
if (!(video instanceof Source.Camera)) return;
+ const enabled = effect.get(this.broadcast.video.enabled);
+ if (!enabled) return;
+
const devices = effect.get(video.device.available);
if (!devices || devices.length < 2) return;
@@ -428,7 +436,9 @@ export default class HangPublish extends HTMLElement {
transform: "translateX(-50%)",
},
});
- effect.event(select, "change", () => video.device.preferred.set(select.value));
+ effect.event(select, "change", () => {
+ video.device.preferred.set(select.value);
+ });
for (const device of devices) {
const option = DOM.create("option", { value: device.deviceId }, device.label);
@@ -436,8 +446,8 @@ export default class HangPublish extends HTMLElement {
}
effect.effect((effect) => {
- const selected = effect.get(video.device.selected);
- select.value = selected?.deviceId ?? "";
+ const requested = effect.get(video.device.requested);
+ select.value = requested ?? "";
});
const caret = DOM.create("span", { style: { fontSize: "0.75em", cursor: "pointer" } }, "â–¼");
@@ -520,9 +530,9 @@ export default class HangPublish extends HTMLElement {
} else if (!audio && !video) {
container.textContent = "🟡\u00A0Select Source";
} else if (!audio && video) {
- container.textContent = "🟡\u00A0Video Only";
+ container.textContent = "🟢\u00A0Video Only";
} else if (audio && !video) {
- container.textContent = "🟡\u00A0Audio Only";
+ container.textContent = "🟢\u00A0Audio Only";
} else if (audio && video) {
container.textContent = "🟢\u00A0Live";
}
diff --git a/js/hang/src/publish/source/camera.ts b/js/hang/src/publish/source/camera.ts
index 5f048c3ea..1019be0db 100644
--- a/js/hang/src/publish/source/camera.ts
+++ b/js/hang/src/publish/source/camera.ts
@@ -29,17 +29,13 @@ export class Camera {
const enabled = effect.get(this.enabled);
if (!enabled) return;
- const device = effect.get(this.device.selected);
- if (!device) return;
-
- console.log("requesting camera", device);
-
+ const device = effect.get(this.device.requested);
const constraints = effect.get(this.constraints) ?? {};
// Build final constraints with device selection
const finalConstraints: MediaTrackConstraints = {
...constraints,
- deviceId: { exact: device.deviceId },
+ deviceId: device ? { exact: device } : undefined,
};
effect.spawn(async (cancel) => {
@@ -57,9 +53,14 @@ export class Camera {
const stream = await Promise.race([media, cancel]);
if (!stream) return;
+ this.device.permission.set(true);
+
const track = stream.getVideoTracks()[0] as VideoStreamTrack | undefined;
if (!track) return;
+ const settings = track.getSettings();
+
+ effect.set(this.device.active, settings.deviceId);
effect.set(this.stream, track, undefined);
});
}
diff --git a/js/hang/src/publish/source/device.ts b/js/hang/src/publish/source/device.ts
index 8acf634b9..acc607e35 100644
--- a/js/hang/src/publish/source/device.ts
+++ b/js/hang/src/publish/source/device.ts
@@ -12,15 +12,21 @@ export class Device