Add files via upload
This commit is contained in:
297
node_modules/eris/lib/voice/Piper.js
generated
vendored
Normal file
297
node_modules/eris/lib/voice/Piper.js
generated
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
"use strict";
|
||||
|
||||
const util = require("util");
|
||||
const Base = require("../structures/Base");
|
||||
const DCAOpusTransformer = require("./streams/DCAOpusTransformer");
|
||||
const FFmpegOggTransformer = require("./streams/FFmpegOggTransformer");
|
||||
const FFmpegPCMTransformer = require("./streams/FFmpegPCMTransformer");
|
||||
const FS = require("fs");
|
||||
const HTTP = require("http");
|
||||
const HTTPS = require("https");
|
||||
const OggOpusTransformer = require("./streams/OggOpusTransformer");
|
||||
const PassThroughStream = require("stream").PassThrough;
|
||||
const PCMOpusTransformer = require("./streams/PCMOpusTransformer");
|
||||
const Stream = require("stream").Stream;
|
||||
const VolumeTransformer = require("./streams/VolumeTransformer");
|
||||
const WebmOpusTransformer = require("./streams/WebmOpusTransformer");
|
||||
|
||||
let EventEmitter;
|
||||
try {
|
||||
EventEmitter = require("eventemitter3");
|
||||
} catch(err) {
|
||||
EventEmitter = require("events").EventEmitter;
|
||||
}
|
||||
|
||||
class Piper extends EventEmitter {
|
||||
constructor(converterCommand, opusFactory) {
|
||||
super();
|
||||
|
||||
this.reset();
|
||||
|
||||
this.converterCommand = converterCommand;
|
||||
this._dataPackets = [];
|
||||
this._dataPacketMax = 30;
|
||||
this._dataPacketMin = 15;
|
||||
this.encoding = false;
|
||||
this.libopus = true;
|
||||
|
||||
this.opusFactory = opusFactory;
|
||||
this.opus = null;
|
||||
|
||||
this.volumeLevel = 1;
|
||||
|
||||
this._retransformer = [];
|
||||
|
||||
this.addDataPacket = this.addDataPacket.bind(this);
|
||||
}
|
||||
|
||||
get dataPacketCount() {
|
||||
return this._dataPackets.length;
|
||||
}
|
||||
|
||||
addDataPacket(packet) {
|
||||
if(!this.encoding) {
|
||||
return;
|
||||
}
|
||||
if(this._dataPackets.push(packet) < this._dataPacketMax && this._endStream && this._endStream.manualCB) {
|
||||
process.nextTick(() => {
|
||||
if(this._endStream && this._endStream.manualCB) {
|
||||
this._endStream.transformCB();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
encode(source, options) {
|
||||
if(this.encoding || this.streams.length) {
|
||||
this.emit("error", new Error("Already encoding"));
|
||||
return false;
|
||||
}
|
||||
|
||||
if(typeof source === "string") {
|
||||
if(options.format === "dca" || options.format === "ogg" || options.format === "webm" || options.format === "pcm") {
|
||||
if(source.startsWith("http://") || source.startsWith("https://")) {
|
||||
const passThrough = new PassThroughStream();
|
||||
if(source.startsWith("http://")) {
|
||||
HTTP.get(source, (res) => res.pipe(passThrough)).once("error", (e) => this.stop(e));
|
||||
} else {
|
||||
HTTPS.get(source, (res) => res.pipe(passThrough)).once("error", (e) => this.stop(e));
|
||||
}
|
||||
source = passThrough;
|
||||
} else {
|
||||
try {
|
||||
FS.statSync(source);
|
||||
} catch(err) {
|
||||
if(err.code === "ENOENT") {
|
||||
this.emit("error", new Error("That file does not exist."));
|
||||
} else {
|
||||
this.emit("error", new Error("An error occured trying to access that file."));
|
||||
}
|
||||
this.reset();
|
||||
return false;
|
||||
}
|
||||
source = FS.createReadStream(source);
|
||||
}
|
||||
}
|
||||
} else if(!(source instanceof Stream) || !source.pipe) {
|
||||
this.emit("error", new Error("Invalid source type"));
|
||||
return false;
|
||||
}
|
||||
|
||||
this._dataPacketMax = 30;
|
||||
this._dataPacketMin = 15;
|
||||
|
||||
if(typeof source !== "string") {
|
||||
this.streams.push(source.once("error", (e) => this.stop(e)));
|
||||
}
|
||||
|
||||
if(options.format === "opusPackets") { // eslint-disable no-empty
|
||||
} else if(options.format === "dca") {
|
||||
this.streams.push(source.pipe(new DCAOpusTransformer()).once("error", (e) => this.stop(e)));
|
||||
} else if(options.format === "ogg") {
|
||||
this.streams.push(source.pipe(new OggOpusTransformer()).once("error", (e) => this.stop(e)));
|
||||
} else if(options.format === "webm") {
|
||||
this.streams.push(source.pipe(new WebmOpusTransformer()).once("error", (e) => this.stop(e)));
|
||||
} else if(!options.format || options.format === "pcm") {
|
||||
if(options.inlineVolume) {
|
||||
if(!options.format) {
|
||||
if(!this.converterCommand) {
|
||||
this.emit("error", new Error("FFmpeg/avconv was not found on this system. Playback of this audio format is impossible"));
|
||||
this.reset();
|
||||
return false;
|
||||
}
|
||||
if(typeof source === "string") {
|
||||
this.streams.push(source = new FFmpegPCMTransformer({
|
||||
command: this.converterCommand,
|
||||
input: source,
|
||||
encoderArgs: options.encoderArgs,
|
||||
inputArgs: options.inputArgs
|
||||
}).once("error", (e) => this.stop(e)));
|
||||
} else {
|
||||
this.streams.push(source = source.pipe(new FFmpegPCMTransformer({
|
||||
command: this.converterCommand,
|
||||
encoderArgs: options.encoderArgs,
|
||||
inputArgs: options.inputArgs
|
||||
})).once("error", (e) => this.stop(e)));
|
||||
}
|
||||
}
|
||||
this.streams.push(this.volume = source = source.pipe(new VolumeTransformer()).once("error", (e) => this.stop(e)));
|
||||
this.volume.setVolume(this.volumeLevel);
|
||||
this.streams.push(this.volume.pipe(new PCMOpusTransformer({
|
||||
opusFactory: this.opusFactory,
|
||||
frameSize: options.frameSize,
|
||||
pcmSize: options.pcmSize
|
||||
})).once("error", (e) => this.stop(e)));
|
||||
this._dataPacketMax = 1; // Live volume updating
|
||||
this._dataPacketMin = 4;
|
||||
} else {
|
||||
if(this.libopus) {
|
||||
if(typeof source === "string") {
|
||||
this.streams.push(source = new FFmpegOggTransformer({
|
||||
command: this.converterCommand,
|
||||
input: source,
|
||||
encoderArgs: options.encoderArgs,
|
||||
inputArgs: options.inputArgs,
|
||||
format: options.format,
|
||||
frameDuration: options.frameDuration
|
||||
}).once("error", (e) => this.stop(e)));
|
||||
} else {
|
||||
this.streams.push(source = source.pipe(new FFmpegOggTransformer({
|
||||
command: this.converterCommand,
|
||||
encoderArgs: options.encoderArgs,
|
||||
inputArgs: options.inputArgs,
|
||||
format: options.format,
|
||||
frameDuration: options.frameDuration
|
||||
})).once("error", (e) => this.stop(e)));
|
||||
}
|
||||
this.streams.push(source.pipe(new OggOpusTransformer()).once("error", (e) => this.stop(e)));
|
||||
} else {
|
||||
if(typeof source === "string") {
|
||||
this.streams.push(source = new FFmpegPCMTransformer({
|
||||
command: this.converterCommand,
|
||||
input: source,
|
||||
encoderArgs: options.encoderArgs,
|
||||
inputArgs: options.inputArgs
|
||||
}).once("error", (e) => this.stop(e)));
|
||||
} else {
|
||||
this.streams.push(source = source.pipe(new FFmpegPCMTransformer({
|
||||
command: this.converterCommand,
|
||||
encoderArgs: options.encoderArgs,
|
||||
inputArgs: options.inputArgs
|
||||
})).once("error", (e) => this.stop(e)));
|
||||
}
|
||||
this.streams.push(source.pipe(new PCMOpusTransformer({
|
||||
opusFactory: this.opusFactory,
|
||||
frameSize: options.frameSize,
|
||||
pcmSize: options.pcmSize
|
||||
})).once("error", (e) => this.stop(e)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
this.emit("error", new Error("Unrecognized format"));
|
||||
this.reset();
|
||||
return false;
|
||||
}
|
||||
|
||||
this._endStream = this.streams[this.streams.length - 1];
|
||||
if(this._endStream.hasOwnProperty("manualCB")) {
|
||||
this._endStream.manualCB = true;
|
||||
}
|
||||
|
||||
this._endStream.on("data", this.addDataPacket);
|
||||
this._endStream.once("end", () => this.stop(null, source));
|
||||
|
||||
this.emit("start");
|
||||
|
||||
return (this.encoding = true);
|
||||
}
|
||||
|
||||
getDataPacket() {
|
||||
if(this._dataPackets.length < this._dataPacketMin && this._endStream && this._endStream.manualCB) {
|
||||
this._endStream.transformCB();
|
||||
}
|
||||
if(this._retransformer.length === 0) {
|
||||
return this._dataPackets.shift();
|
||||
} else {
|
||||
// If we don't have an opus instance yet, create one.
|
||||
if(!this.opus) {
|
||||
this.opus = this.opusFactory();
|
||||
}
|
||||
|
||||
const packet = this.opus.decode(this._dataPackets.shift());
|
||||
for(let i = 0, num; i < packet.length - 1; i += 2) {
|
||||
num = ~~(this._retransformer.shift() * packet.readInt16LE(i));
|
||||
packet.writeInt16LE(num >= 32767 ? 32767 : num <= -32767 ? -32767 : num, i);
|
||||
}
|
||||
return this.opus.encode(packet, 3840 / 2 / 2);
|
||||
}
|
||||
}
|
||||
|
||||
reset() {
|
||||
if(this.streams) {
|
||||
for(const stream of this.streams) {
|
||||
if(typeof stream.destroy === "function") {
|
||||
stream.destroy();
|
||||
} else {
|
||||
stream.unpipe();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.streams = [];
|
||||
this._endStream = null;
|
||||
this.volume = null;
|
||||
}
|
||||
|
||||
resetPackets() {
|
||||
// We no longer need this to convert inline volume, so... let it go.
|
||||
if(this.opus) {
|
||||
this.opus.delete && this.opus.delete();
|
||||
this.opus = null;
|
||||
}
|
||||
this._dataPackets = [];
|
||||
}
|
||||
|
||||
setVolume(volume) {
|
||||
this.volumeLevel = volume;
|
||||
if(!this.volume) {
|
||||
return;
|
||||
}
|
||||
this.volume.setVolume(volume);
|
||||
}
|
||||
|
||||
stop(e, source) {
|
||||
if(source && !this.streams.includes(source)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if(e) {
|
||||
this.emit("error", e);
|
||||
}
|
||||
|
||||
if(this.throttleTimeout) {
|
||||
clearTimeout(this.throttleTimeout);
|
||||
this.throttleTimeout = null;
|
||||
}
|
||||
|
||||
if(this.streams.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if(this._endStream) {
|
||||
this._endStream.removeAllListeners("data");
|
||||
}
|
||||
|
||||
this.reset();
|
||||
if(this.encoding) {
|
||||
this.encoding = false;
|
||||
this.emit("stop");
|
||||
}
|
||||
}
|
||||
|
||||
[util.inspect.custom]() {
|
||||
return Base.prototype[util.inspect.custom].call(this);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = Piper;
|
238
node_modules/eris/lib/voice/SharedStream.js
generated
vendored
Normal file
238
node_modules/eris/lib/voice/SharedStream.js
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
"use strict";
|
||||
|
||||
const util = require("util");
|
||||
const Base = require("../structures/Base");
|
||||
const Piper = require("./Piper");
|
||||
const VoiceConnection = require("./VoiceConnection");
|
||||
const Collection = require("../util/Collection");
|
||||
const {createOpus} = require("../util/Opus");
|
||||
|
||||
let EventEmitter;
|
||||
try {
|
||||
EventEmitter = require("eventemitter3");
|
||||
} catch(err) {
|
||||
EventEmitter = require("events").EventEmitter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a collection of VoiceConnections sharing an input stream
|
||||
* @extends EventEmitter
|
||||
* @prop {Object?} current The current stream
|
||||
* @prop {Boolean} ended Whether the stream ended
|
||||
* @prop {Boolean} playing Whether the voice connection is playing something
|
||||
* @prop {Boolean} speaking Whether someone is speaking
|
||||
* @prop {Number} volume The current volume level of the connection
|
||||
*/
|
||||
class SharedStream extends EventEmitter {
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
this.samplingRate = 48000;
|
||||
this.frameDuration = 20;
|
||||
this.channels = 2;
|
||||
this.bitrate = 64000;
|
||||
|
||||
this.voiceConnections = new Collection(VoiceConnection);
|
||||
|
||||
if(!VoiceConnection._converterCommand.cmd) {
|
||||
VoiceConnection._converterCommand.pickCommand();
|
||||
}
|
||||
|
||||
this.piper = new Piper(VoiceConnection._converterCommand.cmd, () => createOpus(this.samplingRate, this.channels, this.bitrate));
|
||||
/**
|
||||
* Fired when the shared stream encounters an error
|
||||
* @event SharedStream#error
|
||||
* @prop {Error} e The error
|
||||
*/
|
||||
this.piper.on("error", (e) => this.emit("error", e));
|
||||
if(!VoiceConnection._converterCommand.libopus) {
|
||||
this.piper.libopus = false;
|
||||
}
|
||||
|
||||
this.ended = true;
|
||||
this.playing = false;
|
||||
this.speaking = false;
|
||||
|
||||
this._send = this._send.bind(this);
|
||||
}
|
||||
|
||||
get volume() {
|
||||
return this.piper.volumeLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a voice connection to the shared stream
|
||||
* @arg {VoiceConnection} connection The voice connection to add
|
||||
*/
|
||||
add(connection) {
|
||||
const _connection = this.voiceConnections.add(connection);
|
||||
if(_connection.ready) {
|
||||
_connection.setSpeaking(this.speaking);
|
||||
} else {
|
||||
_connection.once("ready", () => {
|
||||
_connection.setSpeaking(this.speaking);
|
||||
});
|
||||
}
|
||||
return _connection;
|
||||
}
|
||||
|
||||
/**
|
||||
* Play an audio or video resource. If playing from a non-opus resource, FFMPEG should be compiled with --enable-libopus for best performance. If playing from HTTPS, FFMPEG must be compiled with --enable-openssl
|
||||
* @arg {ReadableStream | String} resource The audio or video resource, either a ReadableStream, URL, or file path
|
||||
* @arg {Object} [options] Music options
|
||||
* @arg {Array<String>} [options.encoderArgs] Additional encoder parameters to pass to ffmpeg/avconv (after -i)
|
||||
* @arg {String} [options.format] The format of the resource. If null, FFmpeg will attempt to guess and play the format. Available options: "dca", "ogg", "webm", "pcm", null
|
||||
* @arg {Number} [options.frameDuration=60] The resource opus frame duration (required for DCA/Ogg)
|
||||
* @arg {Number} [options.frameSize=2880] The resource opus frame size
|
||||
* @arg {Boolean} [options.inlineVolume=false] Whether to enable on-the-fly volume changing. Note that enabling this leads to increased CPU usage
|
||||
* @arg {Array<String>} [options.inputArgs] Additional input parameters to pass to ffmpeg/avconv (before -i)
|
||||
* @arg {Number} [options.sampleRate=48000] The resource audio sampling rate
|
||||
* @arg {Number} [options.voiceDataTimeout=2000] Timeout when waiting for voice data (-1 for no timeout)
|
||||
*/
|
||||
play(source, options = {}) {
|
||||
options.format = options.format || null;
|
||||
options.voiceDataTimeout = !isNaN(options.voiceDataTimeout) ? options.voiceDataTimeout : 2000;
|
||||
options.inlineVolume = !!options.inlineVolume;
|
||||
options.inputArgs = options.inputArgs || [];
|
||||
options.encoderArgs = options.encoderArgs || [];
|
||||
|
||||
options.samplingRate = options.samplingRate || this.samplingRate;
|
||||
options.frameDuration = options.frameDuration || this.frameDuration;
|
||||
options.frameSize = options.frameSize || options.samplingRate * options.frameDuration / 1000;
|
||||
options.pcmSize = options.pcmSize || options.frameSize * 2 * this.channels;
|
||||
|
||||
if(!this.piper.encode(source, options)) {
|
||||
this.emit("error", new Error("Unable to encode source"));
|
||||
return;
|
||||
}
|
||||
|
||||
this.ended = false;
|
||||
this.current = {
|
||||
startTime: 0, // later
|
||||
playTime: 0,
|
||||
pausedTimestamp: 0,
|
||||
pausedTime: 0,
|
||||
bufferingTicks: 0,
|
||||
options: options,
|
||||
timeout: null,
|
||||
buffer: null
|
||||
};
|
||||
|
||||
this.playing = true;
|
||||
|
||||
/**
|
||||
* Fired when the shared stream starts playing a stream
|
||||
* @event SharedStream#start
|
||||
*/
|
||||
this.emit("start");
|
||||
|
||||
this._send();
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a voice connection from the shared stream
|
||||
* @arg {VoiceConnection} connection The voice connection to remove
|
||||
*/
|
||||
remove(connection) {
|
||||
return this.voiceConnections.remove(connection);
|
||||
}
|
||||
|
||||
setSpeaking(value) {
|
||||
if((value = !!value) != this.speaking) {
|
||||
this.speaking = value;
|
||||
for(const vc of this.voiceConnections.values()) {
|
||||
vc.setSpeaking(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the volume of this shared stream if inline volume is enabled
|
||||
* @arg {Number} volume The volume as a value from 0 (min) to 1 (max)
|
||||
*/
|
||||
setVolume(volume) {
|
||||
this.piper.setVolume(volume);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the bot from sending audio
|
||||
*/
|
||||
stopPlaying() {
|
||||
if(this.ended) {
|
||||
return;
|
||||
}
|
||||
this.ended = true;
|
||||
if(this.current && this.current.timeout) {
|
||||
clearTimeout(this.current.timeout);
|
||||
this.current.timeout = null;
|
||||
}
|
||||
this.current = null;
|
||||
this.piper.stop();
|
||||
this.piper.resetPackets();
|
||||
|
||||
this.setSpeaking(this.playing = false);
|
||||
|
||||
/**
|
||||
* Fired when the shared stream finishes playing a stream
|
||||
* @event SharedStream#end
|
||||
*/
|
||||
this.emit("end");
|
||||
}
|
||||
|
||||
_incrementSequences() {
|
||||
for(const vc of this.voiceConnections.values()) {
|
||||
vc.sequence = (vc.sequence + 1) & 0xFFFF;
|
||||
}
|
||||
}
|
||||
|
||||
_incrementTimestamps(val) {
|
||||
for(const vc of this.voiceConnections.values()) {
|
||||
vc.timestamp = (vc.timestamp + val) >>> 0;
|
||||
}
|
||||
}
|
||||
|
||||
_send() {
|
||||
if(!this.piper.encoding && this.piper.dataPacketCount === 0) {
|
||||
return this.stopPlaying();
|
||||
}
|
||||
|
||||
this._incrementTimestamps(this.current.options.frameSize);
|
||||
|
||||
this._incrementSequences();
|
||||
|
||||
if((this.current.buffer = this.piper.getDataPacket())) {
|
||||
if(this.current.startTime === 0) {
|
||||
this.current.startTime = Date.now();
|
||||
}
|
||||
if(this.current.bufferingTicks > 0) {
|
||||
this.current.bufferingTicks = 0;
|
||||
this.setSpeaking(true);
|
||||
}
|
||||
} else if(this.current.options.voiceDataTimeout === -1 || this.current.bufferingTicks < this.current.options.voiceDataTimeout / (4 * this.current.options.frameDuration)) { // wait for data
|
||||
if(++this.current.bufferingTicks === 1) {
|
||||
this.setSpeaking(false);
|
||||
} else {
|
||||
this.current.pausedTime += 4 * this.current.options.frameDuration;
|
||||
this._incrementTimestamps(3 * this.current.options.frameSize);
|
||||
this.current.timeout = setTimeout(this._send, 4 * this.current.options.frameDuration);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
return this.stopPlaying();
|
||||
}
|
||||
|
||||
this.voiceConnections.forEach((connection) => {
|
||||
if(connection.ready && this.current.buffer) {
|
||||
connection._sendAudioFrame(this.current.buffer);
|
||||
}
|
||||
});
|
||||
this.current.playTime += this.current.options.frameDuration;
|
||||
this.current.timeout = setTimeout(this._send, this.current.startTime + this.current.pausedTime + this.current.playTime - Date.now());
|
||||
}
|
||||
|
||||
[util.inspect.custom]() {
|
||||
return Base.prototype[util.inspect.custom].call(this);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = SharedStream;
|
862
node_modules/eris/lib/voice/VoiceConnection.js
generated
vendored
Normal file
862
node_modules/eris/lib/voice/VoiceConnection.js
generated
vendored
Normal file
@ -0,0 +1,862 @@
|
||||
"use strict";
|
||||
|
||||
const util = require("util");
|
||||
const Base = require("../structures/Base");
|
||||
const ChildProcess = require("child_process");
|
||||
const {VoiceOPCodes, GatewayOPCodes} = require("../Constants");
|
||||
const Dgram = require("dgram");
|
||||
const Net = require("net");
|
||||
const Piper = require("./Piper");
|
||||
const VoiceDataStream = require("./VoiceDataStream");
|
||||
const {createOpus} = require("../util/Opus");
|
||||
|
||||
const WebSocket = typeof window !== "undefined" ? require("../util/BrowserWebSocket") : require("ws");
|
||||
|
||||
let EventEmitter;
|
||||
try {
|
||||
EventEmitter = require("eventemitter3");
|
||||
} catch(err) {
|
||||
EventEmitter = require("events").EventEmitter;
|
||||
}
|
||||
|
||||
let Sodium = null;
|
||||
let NaCl = null;
|
||||
|
||||
const ENCRYPTION_MODE = "xsalsa20_poly1305";
|
||||
const MAX_FRAME_SIZE = 1276 * 3;
|
||||
const SILENCE_FRAME = Buffer.from([0xF8, 0xFF, 0xFE]);
|
||||
|
||||
const converterCommand = {
|
||||
cmd: null,
|
||||
libopus: false
|
||||
};
|
||||
|
||||
converterCommand.pickCommand = function pickCommand() {
|
||||
let tenative;
|
||||
for(const command of ["./ffmpeg", "./avconv", "ffmpeg", "avconv"]) {
|
||||
const res = ChildProcess.spawnSync(command, ["-encoders"]);
|
||||
if(!res.error) {
|
||||
if(!res.stdout.toString().includes("libopus")) {
|
||||
tenative = command;
|
||||
continue;
|
||||
}
|
||||
converterCommand.cmd = command;
|
||||
converterCommand.libopus = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if(tenative) {
|
||||
converterCommand.cmd = tenative;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Represents a voice connection
|
||||
* @extends EventEmitter
|
||||
* @prop {String} channelID The ID of the voice connection's current channel
|
||||
* @prop {Boolean} connecting Whether the voice connection is connecting
|
||||
* @prop {Object?} current The state of the currently playing stream
|
||||
* @prop {Object} current.options The custom options for the current stream
|
||||
* @prop {Array<String>?} current.options.encoderArgs Additional encoder parameters to pass to ffmpeg/avconv (after -i)
|
||||
* @prop {String?} current.options.format The format of the resource. If null, FFmpeg will attempt to guess and play the format. Available options: "dca", "ogg", "webm", "pcm", null
|
||||
* @prop {Number?} current.options.frameDuration The resource opus frame duration (required for DCA/Ogg)
|
||||
* @prop {Number?} current.options.frameSize The resource opus frame size
|
||||
* @prop {Boolean?} current.options.inlineVolume Whether to enable on-the-fly volume changing. Note that enabling this leads to increased CPU usage
|
||||
* @prop {Array<String>?} current.options.inputArgs Additional input parameters to pass to ffmpeg/avconv (before -i)
|
||||
* @prop {Number?} current.options.sampleRate The resource audio sampling rate
|
||||
* @prop {Number?} current.options.voiceDataTimeout Timeout when waiting for voice data (-1 for no timeout)
|
||||
* @prop {Number} current.pausedTime How long the current stream has been paused for, in milliseconds
|
||||
* @prop {Number} current.pausedTimestamp The timestamp of the most recent pause
|
||||
* @prop {Number} current.playTime How long the current stream has been playing for, in milliseconds
|
||||
* @prop {Number} current.startTime The timestamp of the start of the current stream
|
||||
* @prop {String} id The ID of the voice connection (guild ID)
|
||||
* @prop {Boolean} paused Whether the voice connection is paused
|
||||
* @prop {Boolean} playing Whether the voice connection is playing something
|
||||
* @prop {Boolean} ready Whether the voice connection is ready
|
||||
* @prop {Number} volume The current volume level of the connection
|
||||
*/
|
||||
class VoiceConnection extends EventEmitter {
|
||||
constructor(id, options = {}) {
|
||||
super();
|
||||
|
||||
if(typeof window !== "undefined") {
|
||||
throw new Error("Voice is not supported in browsers at this time");
|
||||
}
|
||||
|
||||
if(!Sodium && !NaCl) {
|
||||
try {
|
||||
Sodium = require("sodium-native");
|
||||
} catch(err) {
|
||||
try {
|
||||
NaCl = require("tweetnacl");
|
||||
} catch(err) { // eslint-disable no-empty
|
||||
throw new Error("Error loading tweetnacl/libsodium, voice not available");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.id = id;
|
||||
this.samplingRate = 48000;
|
||||
this.channels = 2;
|
||||
this.frameDuration = 20;
|
||||
this.frameSize = this.samplingRate * this.frameDuration / 1000;
|
||||
this.pcmSize = this.frameSize * this.channels * 2;
|
||||
this.bitrate = 64000;
|
||||
this.shared = !!options.shared;
|
||||
this.shard = options.shard || {};
|
||||
this.opusOnly = !!options.opusOnly;
|
||||
|
||||
if(!this.opusOnly && !this.shared) {
|
||||
this.opus = {};
|
||||
}
|
||||
|
||||
this.channelID = null;
|
||||
this.paused = true;
|
||||
this.speaking = false;
|
||||
this.sequence = 0;
|
||||
this.timestamp = 0;
|
||||
this.ssrcUserMap = {};
|
||||
this.connectionTimeout = null;
|
||||
this.connecting = false;
|
||||
this.reconnecting = false;
|
||||
this.ready = false;
|
||||
|
||||
this.sendBuffer = Buffer.allocUnsafe(16 + 32 + MAX_FRAME_SIZE);
|
||||
this.sendNonce = Buffer.alloc(24);
|
||||
this.sendNonce[0] = 0x80;
|
||||
this.sendNonce[1] = 0x78;
|
||||
|
||||
if(!options.shared) {
|
||||
if(!converterCommand.cmd) {
|
||||
converterCommand.pickCommand();
|
||||
}
|
||||
|
||||
this.piper = new Piper(converterCommand.cmd, () => createOpus(this.samplingRate, this.channels, this.bitrate));
|
||||
/**
|
||||
* Fired when the voice connection encounters an error. This event should be handled by users
|
||||
* @event VoiceConnection#error
|
||||
* @prop {Error} err The error object
|
||||
*/
|
||||
this.piper.on("error", (e) => this.emit("error", e));
|
||||
if(!converterCommand.libopus) {
|
||||
this.piper.libopus = false;
|
||||
}
|
||||
}
|
||||
|
||||
this._send = this._send.bind(this);
|
||||
}
|
||||
|
||||
get volume() {
|
||||
return this.piper.volumeLevel;
|
||||
}
|
||||
|
||||
connect(data) {
|
||||
this.connecting = true;
|
||||
if(this.ws && this.ws.readyState !== WebSocket.CLOSED) {
|
||||
this.disconnect(undefined, true);
|
||||
setTimeout(() => {
|
||||
if(!this.connecting && !this.ready) {
|
||||
this.connect(data);
|
||||
}
|
||||
}, 500).unref();
|
||||
return;
|
||||
}
|
||||
clearTimeout(this.connectionTimeout);
|
||||
this.connectionTimeout = setTimeout(() => {
|
||||
if(this.connecting) {
|
||||
this.disconnect(new Error("Voice connection timeout"));
|
||||
}
|
||||
this.connectionTimeout = null;
|
||||
}, this.shard.client ? this.shard.client.options.connectionTimeout : 30000).unref();
|
||||
if(!data.endpoint) {
|
||||
return; // Endpoint null, wait next update.
|
||||
}
|
||||
if(!data.token || !data.session_id || !data.user_id) {
|
||||
this.disconnect(new Error("Malformed voice server update: " + JSON.stringify(data)));
|
||||
return;
|
||||
}
|
||||
this.channelID = data.channel_id;
|
||||
this.endpoint = new URL(`wss://${data.endpoint}`);
|
||||
if(this.endpoint.port === "80") {
|
||||
this.endpoint.port = "";
|
||||
}
|
||||
this.endpoint.searchParams.set("v", 4);
|
||||
this.ws = new WebSocket(this.endpoint.href);
|
||||
/**
|
||||
* Fired when stuff happens and gives more info
|
||||
* @event VoiceConnection#debug
|
||||
* @prop {String} message The debug message
|
||||
*/
|
||||
this.emit("debug", "Connection: " + JSON.stringify(data));
|
||||
this.ws.on("open", () => {
|
||||
/**
|
||||
* Fired when the voice connection connects
|
||||
* @event VoiceConnection#connect
|
||||
*/
|
||||
this.emit("connect");
|
||||
if(this.connectionTimeout) {
|
||||
clearTimeout(this.connectionTimeout);
|
||||
this.connectionTimeout = null;
|
||||
}
|
||||
this.sendWS(VoiceOPCodes.IDENTIFY, {
|
||||
server_id: this.id === "call" ? data.channel_id : this.id,
|
||||
user_id: data.user_id,
|
||||
session_id: data.session_id,
|
||||
token: data.token
|
||||
});
|
||||
});
|
||||
this.ws.on("message", (m) => {
|
||||
const packet = JSON.parse(m);
|
||||
if(this.listeners("debug").length > 0) {
|
||||
this.emit("debug", "Rec: " + JSON.stringify(packet));
|
||||
}
|
||||
switch(packet.op) {
|
||||
case VoiceOPCodes.READY: {
|
||||
this.ssrc = packet.d.ssrc;
|
||||
this.sendNonce.writeUInt32BE(this.ssrc, 8);
|
||||
if(!packet.d.modes.includes(ENCRYPTION_MODE)) {
|
||||
throw new Error("No supported voice mode found");
|
||||
}
|
||||
|
||||
this.modes = packet.d.modes;
|
||||
|
||||
this.udpIP = packet.d.ip;
|
||||
this.udpPort = packet.d.port;
|
||||
|
||||
this.emit("debug", "Connecting to UDP: " + this.udpIP + ":" + this.udpPort);
|
||||
|
||||
this.udpSocket = Dgram.createSocket(Net.isIPv6(this.udpIP) ? "udp6" : "udp4");
|
||||
this.udpSocket.on("error", (err, msg) => {
|
||||
this.emit("error", err);
|
||||
if(msg) {
|
||||
this.emit("debug", "Voice UDP error: " + msg);
|
||||
}
|
||||
if(this.ready || this.connecting) {
|
||||
this.disconnect(err);
|
||||
}
|
||||
});
|
||||
this.udpSocket.once("message", (packet) => {
|
||||
let i = 8;
|
||||
while(packet[i] !== 0) {
|
||||
i++;
|
||||
}
|
||||
const localIP = packet.toString("ascii", 8, i);
|
||||
const localPort = packet.readUInt16BE(packet.length - 2);
|
||||
this.emit("debug", `Discovered IP: ${localIP}:${localPort} (${packet.toString("hex")})`);
|
||||
|
||||
this.sendWS(VoiceOPCodes.SELECT_PROTOCOL, {
|
||||
protocol: "udp",
|
||||
data: {
|
||||
address: localIP,
|
||||
port: localPort,
|
||||
mode: ENCRYPTION_MODE
|
||||
}
|
||||
});
|
||||
});
|
||||
this.udpSocket.on("close", (err) => {
|
||||
if(err) {
|
||||
this.emit("warn", "Voice UDP close: " + err);
|
||||
}
|
||||
if(this.ready || this.connecting) {
|
||||
this.disconnect(err);
|
||||
}
|
||||
});
|
||||
const udpMessage = Buffer.allocUnsafe(74);
|
||||
udpMessage.writeUInt16BE(0x1, 0);
|
||||
udpMessage.writeUInt16BE(70, 2);
|
||||
udpMessage.writeUInt32BE(this.ssrc, 4);
|
||||
this.sendUDPPacket(udpMessage);
|
||||
break;
|
||||
}
|
||||
case VoiceOPCodes.SESSION_DESCRIPTION: {
|
||||
this.mode = packet.d.mode;
|
||||
this.secret = Buffer.from(packet.d.secret_key);
|
||||
this.connecting = false;
|
||||
this.reconnecting = false;
|
||||
this.ready = true;
|
||||
// Send audio to properly establish the socket (e.g. for voice receive)
|
||||
this.sendAudioFrame(SILENCE_FRAME, this.frameSize);
|
||||
/**
|
||||
* Fired when the voice connection turns ready
|
||||
* @event VoiceConnection#ready
|
||||
*/
|
||||
this.emit("ready");
|
||||
this.resume();
|
||||
if(this.receiveStreamOpus || this.receiveStreamPCM) {
|
||||
this.registerReceiveEventHandler();
|
||||
}
|
||||
break;
|
||||
}
|
||||
case VoiceOPCodes.HEARTBEAT_ACK: {
|
||||
/**
|
||||
* Fired when the voice connection receives a pong
|
||||
* @event VoiceConnection#pong
|
||||
* @prop {Number} latency The current latency in milliseconds
|
||||
*/
|
||||
this.emit("pong", Date.now() - packet.d);
|
||||
break;
|
||||
}
|
||||
case VoiceOPCodes.SPEAKING: {
|
||||
this.ssrcUserMap[packet.d.ssrc] = packet.d.user_id;
|
||||
/**
|
||||
* Fired when a user begins speaking
|
||||
* @event VoiceConnection#speakingStart
|
||||
* @prop {String} userID The ID of the user that began speaking
|
||||
*/
|
||||
/**
|
||||
* Fired when a user stops speaking
|
||||
* @event VoiceConnection#speakingStop
|
||||
* @prop {String} userID The ID of the user that stopped speaking
|
||||
*/
|
||||
this.emit(packet.d.speaking ? "speakingStart" : "speakingStop", packet.d.user_id);
|
||||
break;
|
||||
}
|
||||
case VoiceOPCodes.HELLO: {
|
||||
if(this.heartbeatInterval) {
|
||||
clearInterval(this.heartbeatInterval);
|
||||
}
|
||||
this.heartbeatInterval = setInterval(() => {
|
||||
this.heartbeat();
|
||||
}, packet.d.heartbeat_interval);
|
||||
|
||||
this.heartbeat();
|
||||
break;
|
||||
}
|
||||
case VoiceOPCodes.CLIENT_DISCONNECT: {
|
||||
if(this.opus) {
|
||||
// opusscript requires manual cleanup
|
||||
if(this.opus[packet.d.user_id] && this.opus[packet.d.user_id].delete) {
|
||||
this.opus[packet.d.user_id].delete();
|
||||
}
|
||||
|
||||
delete this.opus[packet.d.user_id];
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when a user disconnects from the voice server
|
||||
* @event VoiceConnection#userDisconnect
|
||||
* @prop {String} userID The ID of the user that disconnected
|
||||
*/
|
||||
this.emit("userDisconnect", packet.d.user_id);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
this.emit("unknown", packet);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
this.ws.on("error", (err) => {
|
||||
this.emit("error", err);
|
||||
});
|
||||
this.ws.on("close", (code, reason) => {
|
||||
let err = !code || code === 1000 ? null : new Error(code + ": " + reason);
|
||||
this.emit("warn", `Voice WS close ${code}: ${reason}`);
|
||||
if(this.connecting || this.ready) {
|
||||
let reconnecting = true;
|
||||
if(code === 4006) {
|
||||
reconnecting = false;
|
||||
} else if(code === 4014) {
|
||||
if(this.channelID) {
|
||||
data.endpoint = null;
|
||||
reconnecting = true;
|
||||
err = null;
|
||||
} else {
|
||||
reconnecting = false;
|
||||
}
|
||||
} else if(code === 1000) {
|
||||
reconnecting = false;
|
||||
}
|
||||
this.disconnect(err, reconnecting);
|
||||
if(reconnecting) {
|
||||
setTimeout(() => {
|
||||
if(!this.connecting && !this.ready) {
|
||||
this.connect(data);
|
||||
}
|
||||
}, 500).unref();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
disconnect(error, reconnecting) {
|
||||
this.connecting = false;
|
||||
this.reconnecting = reconnecting;
|
||||
this.ready = false;
|
||||
this.speaking = false;
|
||||
this.timestamp = 0;
|
||||
this.sequence = 0;
|
||||
|
||||
if(this.connectionTimeout) {
|
||||
clearTimeout(this.connectionTimeout);
|
||||
this.connectionTimeout = null;
|
||||
}
|
||||
|
||||
try {
|
||||
if(reconnecting) {
|
||||
this.pause();
|
||||
} else {
|
||||
this.stopPlaying();
|
||||
}
|
||||
} catch(err) {
|
||||
this.emit("error", err);
|
||||
}
|
||||
if(this.heartbeatInterval) {
|
||||
clearInterval(this.heartbeatInterval);
|
||||
this.heartbeatInterval = null;
|
||||
}
|
||||
if(this.udpSocket) {
|
||||
try {
|
||||
this.udpSocket.close();
|
||||
} catch(err) {
|
||||
if(err.message !== "Not running") {
|
||||
this.emit("error", err);
|
||||
}
|
||||
}
|
||||
this.udpSocket = null;
|
||||
}
|
||||
if(this.ws) {
|
||||
try {
|
||||
if(reconnecting) {
|
||||
if(this.ws.readyState === WebSocket.OPEN) {
|
||||
this.ws.close(4901, "Eris: reconnect");
|
||||
} else {
|
||||
this.emit("debug", `Terminating websocket (state: ${this.ws.readyState})`);
|
||||
this.ws.terminate();
|
||||
}
|
||||
} else {
|
||||
this.ws.close(1000, "Eris: normal");
|
||||
}
|
||||
} catch(err) {
|
||||
this.emit("error", err);
|
||||
}
|
||||
this.ws = null;
|
||||
}
|
||||
if(reconnecting) {
|
||||
if(error) {
|
||||
this.emit("error", error);
|
||||
}
|
||||
} else {
|
||||
this.channelID = null;
|
||||
this.updateVoiceState();
|
||||
/**
|
||||
* Fired when the voice connection disconnects
|
||||
* @event VoiceConnection#disconnect
|
||||
* @prop {Error?} error The error, if any
|
||||
*/
|
||||
this.emit("disconnect", error);
|
||||
}
|
||||
}
|
||||
|
||||
heartbeat() {
|
||||
this.sendWS(VoiceOPCodes.HEARTBEAT, Date.now());
|
||||
if(this.udpSocket) {
|
||||
// NAT/connection table keep-alive
|
||||
const udpMessage = Buffer.from([0x80, 0xC8, 0x0, 0x0]);
|
||||
this.sendUDPPacket(udpMessage);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pause sending audio (if playing)
|
||||
*/
|
||||
pause() {
|
||||
this.paused = true;
|
||||
this.setSpeaking(0);
|
||||
if(this.current) {
|
||||
if(!this.current.pausedTimestamp) {
|
||||
this.current.pausedTimestamp = Date.now();
|
||||
}
|
||||
if(this.current.timeout) {
|
||||
clearTimeout(this.current.timeout);
|
||||
this.current.timeout = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Play an audio or video resource. If playing from a non-opus resource, FFMPEG should be compiled with --enable-libopus for best performance. If playing from HTTPS, FFMPEG must be compiled with --enable-openssl
|
||||
* @arg {ReadableStream | String} resource The audio or video resource, either a ReadableStream, URL, or file path
|
||||
* @arg {Object} [options] Music options
|
||||
* @arg {Array<String>} [options.encoderArgs] Additional encoder parameters to pass to ffmpeg/avconv (after -i)
|
||||
* @arg {String} [options.format] The format of the resource. If null, FFmpeg will attempt to guess and play the format. Available options: "dca", "ogg", "webm", "pcm", null
|
||||
* @arg {Number} [options.frameDuration=20] The resource opus frame duration (required for DCA/Ogg)
|
||||
* @arg {Number} [options.frameSize=2880] The resource opus frame size
|
||||
* @arg {Boolean} [options.inlineVolume=false] Whether to enable on-the-fly volume changing. Note that enabling this leads to increased CPU usage
|
||||
* @arg {Array<String>} [options.inputArgs] Additional input parameters to pass to ffmpeg/avconv (before -i)
|
||||
* @arg {Number} [options.pcmSize=options.frameSize*2*this.channels] The PCM size if the "pcm" format is used
|
||||
* @arg {Number} [options.samplingRate=48000] The resource audio sampling rate
|
||||
* @arg {Number} [options.voiceDataTimeout=2000] Timeout when waiting for voice data (-1 for no timeout)
|
||||
*/
|
||||
play(source, options = {}) {
|
||||
if(this.shared) {
|
||||
throw new Error("Cannot play stream on shared voice connection");
|
||||
}
|
||||
if(!this.ready) {
|
||||
throw new Error("Not ready yet");
|
||||
}
|
||||
|
||||
options.format = options.format || null;
|
||||
options.voiceDataTimeout = !isNaN(options.voiceDataTimeout) ? options.voiceDataTimeout : 2000;
|
||||
options.inlineVolume = !!options.inlineVolume;
|
||||
options.inputArgs = options.inputArgs || [];
|
||||
options.encoderArgs = options.encoderArgs || [];
|
||||
|
||||
options.samplingRate = options.samplingRate || this.samplingRate;
|
||||
options.frameDuration = options.frameDuration || this.frameDuration;
|
||||
options.frameSize = options.frameSize || options.samplingRate * options.frameDuration / 1000;
|
||||
options.pcmSize = options.pcmSize || options.frameSize * 2 * this.channels;
|
||||
|
||||
if(!this.piper.encode(source, options)) {
|
||||
this.emit("error", new Error("Unable to encode source"));
|
||||
return;
|
||||
}
|
||||
|
||||
this.ended = false;
|
||||
this.current = {
|
||||
startTime: 0, // later
|
||||
playTime: 0,
|
||||
pausedTimestamp: 0,
|
||||
pausedTime: 0,
|
||||
bufferingTicks: 0,
|
||||
options: options,
|
||||
timeout: null,
|
||||
buffer: null
|
||||
};
|
||||
|
||||
this.playing = true;
|
||||
|
||||
/**
|
||||
* Fired when the voice connection starts playing a stream
|
||||
* @event VoiceConnection#start
|
||||
*/
|
||||
this.emit("start");
|
||||
|
||||
this._send();
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a receive stream for the voice connection.
|
||||
* @arg {String} [type="pcm"] The desired voice data type, either "opus" or "pcm"
|
||||
* @returns {VoiceDataStream}
|
||||
*/
|
||||
receive(type) {
|
||||
if(type === "pcm") {
|
||||
if(!this.receiveStreamPCM) {
|
||||
this.receiveStreamPCM = new VoiceDataStream(type);
|
||||
if(!this.receiveStreamOpus) {
|
||||
this.registerReceiveEventHandler();
|
||||
}
|
||||
}
|
||||
} else if(type === "opus") {
|
||||
if(!this.receiveStreamOpus) {
|
||||
this.receiveStreamOpus = new VoiceDataStream(type);
|
||||
if(!this.receiveStreamPCM) {
|
||||
this.registerReceiveEventHandler();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new Error(`Unsupported voice data type: ${type}`);
|
||||
}
|
||||
return type === "pcm" ? this.receiveStreamPCM : this.receiveStreamOpus;
|
||||
}
|
||||
|
||||
registerReceiveEventHandler() {
|
||||
this.udpSocket.on("message", (msg) => {
|
||||
if(msg[1] !== 0x78) { // unknown payload type, ignore
|
||||
return;
|
||||
}
|
||||
|
||||
const nonce = Buffer.alloc(24);
|
||||
msg.copy(nonce, 0, 0, 12);
|
||||
let data;
|
||||
if(Sodium) {
|
||||
data = Buffer.alloc(msg.length - 12 - Sodium.crypto_secretbox_MACBYTES);
|
||||
Sodium.crypto_secretbox_open_easy(data, msg.subarray(12), nonce, this.secret);
|
||||
} else {
|
||||
if(!(data = NaCl.secretbox.open(msg.subarray(12), nonce, this.secret))) {
|
||||
/**
|
||||
* Fired to warn of something weird but non-breaking happening
|
||||
* @event VoiceConnection#warn
|
||||
* @prop {String} message The warning message
|
||||
*/
|
||||
this.emit("warn", "Failed to decrypt received packet");
|
||||
return;
|
||||
}
|
||||
}
|
||||
const hasExtension = !!(msg[0] & 0b10000);
|
||||
const cc = msg[0] & 0b1111;
|
||||
if(cc > 0) {
|
||||
data = data.subarray(cc * 4);
|
||||
}
|
||||
// Not a RFC5285 One Byte Header Extension (not negotiated)
|
||||
if(hasExtension) { // RFC3550 5.3.1: RTP Header Extension
|
||||
const l = data[2] << 8 | data[3];
|
||||
data = data.subarray(4 + l * 4);
|
||||
}
|
||||
if(this.receiveStreamOpus) {
|
||||
/**
|
||||
* Fired when a voice data packet is received
|
||||
* @event VoiceDataStream#data
|
||||
* @prop {Buffer} data The voice data
|
||||
* @prop {String} userID The user who sent the voice packet
|
||||
* @prop {Number} timestamp The intended timestamp of the packet
|
||||
* @prop {Number} sequence The intended sequence number of the packet
|
||||
*/
|
||||
this.receiveStreamOpus.emit("data", data, this.ssrcUserMap[nonce.readUIntBE(8, 4)], nonce.readUIntBE(4, 4), nonce.readUIntBE(2, 2));
|
||||
}
|
||||
if(this.receiveStreamPCM) {
|
||||
const userID = this.ssrcUserMap[nonce.readUIntBE(8, 4)];
|
||||
if(!this.opus[userID]) {
|
||||
this.opus[userID] = createOpus(this.samplingRate, this.channels, this.bitrate);
|
||||
}
|
||||
|
||||
data = this.opus[userID].decode(data, this.frameSize);
|
||||
if(!data) {
|
||||
return this.emit("warn", "Failed to decode received packet");
|
||||
}
|
||||
this.receiveStreamPCM.emit("data", data, userID, nonce.readUIntBE(4, 4), nonce.readUIntBE(2, 2));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume sending audio (if paused)
|
||||
*/
|
||||
resume() {
|
||||
this.paused = false;
|
||||
if(this.current) {
|
||||
this.setSpeaking(1);
|
||||
if(this.current.pausedTimestamp) {
|
||||
this.current.pausedTime += Date.now() - this.current.pausedTimestamp;
|
||||
this.current.pausedTimestamp = 0;
|
||||
}
|
||||
this._send();
|
||||
} else {
|
||||
this.setSpeaking(0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a packet containing an Opus audio frame
|
||||
* @arg {Buffer} frame The Opus audio frame
|
||||
* @arg {Number} [frameSize] The size (in samples) of the Opus audio frame
|
||||
*/
|
||||
sendAudioFrame(frame, frameSize = this.frameSize) {
|
||||
this.timestamp = (this.timestamp + frameSize) >>> 0;
|
||||
this.sequence = (this.sequence + 1) & 0xFFFF;
|
||||
|
||||
return this._sendAudioFrame(frame);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a packet through the connection's UDP socket. The packet is dropped if the socket isn't established
|
||||
* @arg {Buffer} packet The packet data
|
||||
*/
|
||||
sendUDPPacket(packet) {
|
||||
if(this.udpSocket) {
|
||||
try {
|
||||
this.udpSocket.send(packet, 0, packet.length, this.udpPort, this.udpIP);
|
||||
} catch(e) {
|
||||
this.emit("error", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sendWS(op, data) {
|
||||
if(this.ws && this.ws.readyState === WebSocket.OPEN) {
|
||||
data = JSON.stringify({op: op, d: data});
|
||||
this.ws.send(data);
|
||||
this.emit("debug", data);
|
||||
}
|
||||
}
|
||||
|
||||
setSpeaking(value, delay = 0) {
|
||||
this.speaking = value === true ? 1 : value === false ? 0 : value;
|
||||
this.sendWS(VoiceOPCodes.SPEAKING, {
|
||||
speaking: value,
|
||||
delay: delay,
|
||||
ssrc: this.ssrc
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Modify the output volume of the current stream (if inlineVolume is enabled for the current stream)
|
||||
* @arg {Number} [volume=1.0] The desired volume. 0.0 is 0%, 1.0 is 100%, 2.0 is 200%, etc. It is not recommended to go above 2.0
|
||||
*/
|
||||
setVolume(volume) {
|
||||
this.piper.setVolume(volume);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the bot from sending audio
|
||||
*/
|
||||
stopPlaying() {
|
||||
if(this.ended) {
|
||||
return;
|
||||
}
|
||||
this.ended = true;
|
||||
if(this.current && this.current.timeout) {
|
||||
clearTimeout(this.current.timeout);
|
||||
this.current.timeout = null;
|
||||
}
|
||||
this.current = null;
|
||||
if(this.piper) {
|
||||
this.piper.stop();
|
||||
this.piper.resetPackets();
|
||||
}
|
||||
|
||||
if(this.secret) {
|
||||
for(let i = 0; i < 5; i++) {
|
||||
this.sendAudioFrame(SILENCE_FRAME, this.frameSize);
|
||||
}
|
||||
}
|
||||
this.playing = false;
|
||||
this.setSpeaking(0);
|
||||
|
||||
/**
|
||||
* Fired when the voice connection finishes playing a stream
|
||||
* @event VoiceConnection#end
|
||||
*/
|
||||
this.emit("end");
|
||||
}
|
||||
|
||||
/**
|
||||
* Switch the voice channel the bot is in. The channel to switch to must be in the same guild as the current voice channel
|
||||
* @arg {String} channelID The ID of the voice channel
|
||||
*/
|
||||
switchChannel(channelID, reactive) {
|
||||
if(this.channelID === channelID) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.channelID = channelID;
|
||||
if(reactive) {
|
||||
if(this.reconnecting && !channelID) {
|
||||
this.disconnect();
|
||||
}
|
||||
} else {
|
||||
this.updateVoiceState();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the bot's voice state
|
||||
* @arg {Boolean} selfMute Whether the bot muted itself or not (audio receiving is unaffected)
|
||||
* @arg {Boolean} selfDeaf Whether the bot deafened itself or not (audio sending is unaffected)
|
||||
*/
|
||||
updateVoiceState(selfMute, selfDeaf) {
|
||||
if(this.shard.sendWS) {
|
||||
this.shard.sendWS(GatewayOPCodes.VOICE_STATE_UPDATE, {
|
||||
guild_id: this.id === "call" ? null : this.id,
|
||||
channel_id: this.channelID || null,
|
||||
self_mute: !!selfMute,
|
||||
self_deaf: !!selfDeaf
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
_destroy() {
|
||||
if(this.opus) {
|
||||
for(const key in this.opus) {
|
||||
this.opus[key].delete && this.opus[key].delete();
|
||||
delete this.opus[key];
|
||||
}
|
||||
}
|
||||
delete this.piper;
|
||||
if(this.receiveStreamOpus) {
|
||||
this.receiveStreamOpus.removeAllListeners();
|
||||
this.receiveStreamOpus = null;
|
||||
}
|
||||
if(this.receiveStreamPCM) {
|
||||
this.receiveStreamPCM.removeAllListeners();
|
||||
this.receiveStreamPCM = null;
|
||||
}
|
||||
}
|
||||
|
||||
_send() {
|
||||
if(!this.piper.encoding && this.piper.dataPacketCount === 0) {
|
||||
return this.stopPlaying();
|
||||
}
|
||||
|
||||
if((this.current.buffer = this.piper.getDataPacket())) {
|
||||
if(this.current.startTime === 0) {
|
||||
this.current.startTime = Date.now();
|
||||
}
|
||||
if(this.current.bufferingTicks > 0) {
|
||||
this.current.bufferingTicks = 0;
|
||||
this.setSpeaking(1);
|
||||
}
|
||||
} else if(this.current.options.voiceDataTimeout === -1 || this.current.bufferingTicks < this.current.options.voiceDataTimeout / (4 * this.current.options.frameDuration)) { // wait for data
|
||||
if(++this.current.bufferingTicks === 1) {
|
||||
this.setSpeaking(0);
|
||||
}
|
||||
this.current.pausedTime += 4 * this.current.options.frameDuration;
|
||||
this.timestamp = (this.timestamp + 3 * this.current.options.frameSize) >>> 0;
|
||||
this.current.timeout = setTimeout(this._send, 4 * this.current.options.frameDuration);
|
||||
return;
|
||||
} else {
|
||||
return this.stopPlaying();
|
||||
}
|
||||
|
||||
this.sendAudioFrame(this.current.buffer, this.current.options.frameSize);
|
||||
this.current.playTime += this.current.options.frameDuration;
|
||||
this.current.timeout = setTimeout(this._send, this.current.startTime + this.current.pausedTime + this.current.playTime - Date.now());
|
||||
}
|
||||
|
||||
_sendAudioFrame(frame) {
|
||||
this.sendNonce.writeUInt16BE(this.sequence, 2);
|
||||
this.sendNonce.writeUInt32BE(this.timestamp, 4);
|
||||
|
||||
if(Sodium) {
|
||||
const MACBYTES = Sodium.crypto_secretbox_MACBYTES;
|
||||
const length = frame.length + MACBYTES;
|
||||
this.sendBuffer.fill(0, 12, 12 + MACBYTES);
|
||||
frame.copy(this.sendBuffer, 12 + MACBYTES);
|
||||
Sodium.crypto_secretbox_easy(this.sendBuffer.subarray(12, 12 + length), this.sendBuffer.subarray(12 + MACBYTES, 12 + length), this.sendNonce, this.secret);
|
||||
this.sendNonce.copy(this.sendBuffer, 0, 0, 12);
|
||||
return this.sendUDPPacket(this.sendBuffer.subarray(0, 12 + length));
|
||||
} else {
|
||||
const BOXZEROBYTES = NaCl.lowlevel.crypto_secretbox_BOXZEROBYTES;
|
||||
const ZEROBYTES = NaCl.lowlevel.crypto_secretbox_ZEROBYTES;
|
||||
const length = frame.length + BOXZEROBYTES;
|
||||
this.sendBuffer.fill(0, BOXZEROBYTES, BOXZEROBYTES + ZEROBYTES);
|
||||
frame.copy(this.sendBuffer, BOXZEROBYTES + ZEROBYTES);
|
||||
NaCl.lowlevel.crypto_secretbox(this.sendBuffer, this.sendBuffer.subarray(BOXZEROBYTES), ZEROBYTES + frame.length, this.sendNonce, this.secret);
|
||||
this.sendNonce.copy(this.sendBuffer, BOXZEROBYTES - 12, 0, 12);
|
||||
return this.sendUDPPacket(this.sendBuffer.subarray(BOXZEROBYTES - 12, BOXZEROBYTES + length));
|
||||
}
|
||||
}
|
||||
|
||||
// [DEPRECATED]
|
||||
_sendAudioPacket(audio) {
|
||||
return this._sendAudioFrame(audio);
|
||||
}
|
||||
|
||||
[util.inspect.custom]() {
|
||||
return Base.prototype[util.inspect.custom].call(this);
|
||||
}
|
||||
|
||||
toString() {
|
||||
return `[VoiceConnection ${this.channelID}]`;
|
||||
}
|
||||
|
||||
toJSON(props = []) {
|
||||
return Base.prototype.toJSON.call(this, [
|
||||
"channelID",
|
||||
"connecting",
|
||||
"current",
|
||||
"id",
|
||||
"paused",
|
||||
"playing",
|
||||
"ready",
|
||||
"volume",
|
||||
...props
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
VoiceConnection._converterCommand = converterCommand;
|
||||
|
||||
module.exports = VoiceConnection;
|
148
node_modules/eris/lib/voice/VoiceConnectionManager.js
generated
vendored
Normal file
148
node_modules/eris/lib/voice/VoiceConnectionManager.js
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
"use strict";
|
||||
|
||||
const Base = require("../structures/Base");
|
||||
const Collection = require("../util/Collection");
|
||||
|
||||
class VoiceConnectionManager extends Collection {
|
||||
constructor(vcObject) {
|
||||
super(vcObject || require("./VoiceConnection"));
|
||||
this.pendingGuilds = {};
|
||||
}
|
||||
|
||||
join(guildID, channelID, options) {
|
||||
const connection = this.get(guildID);
|
||||
if(connection && connection.ws) {
|
||||
connection.switchChannel(channelID);
|
||||
if(connection.ready) {
|
||||
return Promise.resolve(connection);
|
||||
} else {
|
||||
return new Promise((res, rej) => {
|
||||
const disconnectHandler = () => {
|
||||
connection.removeListener("ready", readyHandler);
|
||||
connection.removeListener("error", errorHandler);
|
||||
rej(new Error("Disconnected"));
|
||||
};
|
||||
const readyHandler = () => {
|
||||
connection.removeListener("disconnect", disconnectHandler);
|
||||
connection.removeListener("error", errorHandler);
|
||||
res(connection);
|
||||
};
|
||||
const errorHandler = (err) => {
|
||||
connection.removeListener("disconnect", disconnectHandler);
|
||||
connection.removeListener("ready", readyHandler);
|
||||
connection.disconnect();
|
||||
rej(err);
|
||||
};
|
||||
connection.once("ready", readyHandler).once("disconnect", disconnectHandler).once("error", errorHandler);
|
||||
});
|
||||
}
|
||||
}
|
||||
return new Promise((res, rej) => {
|
||||
this.pendingGuilds[guildID] = {
|
||||
channelID: channelID,
|
||||
options: options || {},
|
||||
res: res,
|
||||
rej: rej,
|
||||
timeout: setTimeout(() => {
|
||||
delete this.pendingGuilds[guildID];
|
||||
rej(new Error("Voice connection timeout"));
|
||||
}, 10000)
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
leave(guildID) {
|
||||
const connection = this.get(guildID);
|
||||
if(!connection) {
|
||||
return;
|
||||
}
|
||||
connection.disconnect();
|
||||
connection._destroy();
|
||||
this.remove(connection);
|
||||
}
|
||||
|
||||
switch(guildID, channelID) {
|
||||
const connection = this.get(guildID);
|
||||
if(!connection) {
|
||||
return;
|
||||
}
|
||||
connection.switch(channelID);
|
||||
}
|
||||
|
||||
voiceServerUpdate(data) {
|
||||
if(this.pendingGuilds[data.guild_id] && this.pendingGuilds[data.guild_id].timeout) {
|
||||
clearTimeout(this.pendingGuilds[data.guild_id].timeout);
|
||||
this.pendingGuilds[data.guild_id].timeout = null;
|
||||
}
|
||||
let connection = this.get(data.guild_id);
|
||||
if(!connection) {
|
||||
if(!this.pendingGuilds[data.guild_id]) {
|
||||
return;
|
||||
}
|
||||
connection = this.add(new this.baseObject(data.guild_id, {
|
||||
shard: data.shard,
|
||||
opusOnly: this.pendingGuilds[data.guild_id].options.opusOnly,
|
||||
shared: this.pendingGuilds[data.guild_id].options.shared
|
||||
}));
|
||||
}
|
||||
connection.connect({
|
||||
channel_id: (this.pendingGuilds[data.guild_id] || connection).channelID,
|
||||
endpoint: data.endpoint,
|
||||
token: data.token,
|
||||
session_id: data.session_id,
|
||||
user_id: data.user_id
|
||||
});
|
||||
if(!this.pendingGuilds[data.guild_id] || this.pendingGuilds[data.guild_id].waiting) {
|
||||
return;
|
||||
}
|
||||
this.pendingGuilds[data.guild_id].waiting = true;
|
||||
const disconnectHandler = () => {
|
||||
connection = this.get(data.guild_id);
|
||||
if(connection) {
|
||||
connection.removeListener("ready", readyHandler);
|
||||
connection.removeListener("error", errorHandler);
|
||||
}
|
||||
if(this.pendingGuilds[data.guild_id]) {
|
||||
this.pendingGuilds[data.guild_id].rej(new Error("Disconnected"));
|
||||
delete this.pendingGuilds[data.guild_id];
|
||||
}
|
||||
};
|
||||
const readyHandler = () => {
|
||||
connection = this.get(data.guild_id);
|
||||
if(connection) {
|
||||
connection.removeListener("disconnect", disconnectHandler);
|
||||
connection.removeListener("error", errorHandler);
|
||||
}
|
||||
if(this.pendingGuilds[data.guild_id]) {
|
||||
this.pendingGuilds[data.guild_id].res(connection);
|
||||
delete this.pendingGuilds[data.guild_id];
|
||||
}
|
||||
};
|
||||
const errorHandler = (err) => {
|
||||
connection = this.get(data.guild_id);
|
||||
if(connection) {
|
||||
connection.removeListener("disconnect", disconnectHandler);
|
||||
connection.removeListener("ready", readyHandler);
|
||||
connection.disconnect();
|
||||
}
|
||||
if(this.pendingGuilds[data.guild_id]) {
|
||||
this.pendingGuilds[data.guild_id].rej(err);
|
||||
delete this.pendingGuilds[data.guild_id];
|
||||
}
|
||||
};
|
||||
connection.once("ready", readyHandler).once("disconnect", disconnectHandler).once("error", errorHandler);
|
||||
}
|
||||
|
||||
toString() {
|
||||
return "[VoiceConnectionManager]";
|
||||
}
|
||||
|
||||
toJSON(props = []) {
|
||||
return Base.prototype.toJSON.call(this, [
|
||||
"pendingGuilds",
|
||||
...props
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = VoiceConnectionManager;
|
22
node_modules/eris/lib/voice/VoiceDataStream.js
generated
vendored
Normal file
22
node_modules/eris/lib/voice/VoiceDataStream.js
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
"use strict";
|
||||
|
||||
let EventEmitter;
|
||||
try {
|
||||
EventEmitter = require("eventemitter3");
|
||||
} catch(err) {
|
||||
EventEmitter = require("events").EventEmitter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a voice data stream
|
||||
* @extends EventEmitter
|
||||
* @prop {String} type The targeted voice data type for the stream, either "opus" or "pcm"
|
||||
*/
|
||||
class VoiceDataStream extends EventEmitter {
|
||||
constructor(type) {
|
||||
super();
|
||||
this.type = type;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = VoiceDataStream;
|
40
node_modules/eris/lib/voice/streams/BaseTransformer.js
generated
vendored
Normal file
40
node_modules/eris/lib/voice/streams/BaseTransformer.js
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
"use strict";
|
||||
|
||||
const util = require("util");
|
||||
const Base = require("../../structures/Base");
|
||||
const TransformStream = require("stream").Transform;
|
||||
|
||||
class BaseTransformer extends TransformStream {
|
||||
constructor(options = {}) {
|
||||
if(options.allowHalfOpen === undefined) {
|
||||
options.allowHalfOpen = true;
|
||||
}
|
||||
if(options.highWaterMark === undefined) {
|
||||
options.highWaterMark = 0;
|
||||
}
|
||||
super(options);
|
||||
this.manualCB = false;
|
||||
}
|
||||
|
||||
setTransformCB(cb) {
|
||||
if(this.manualCB) {
|
||||
this.transformCB();
|
||||
this._transformCB = cb;
|
||||
} else {
|
||||
cb();
|
||||
}
|
||||
}
|
||||
|
||||
transformCB() {
|
||||
if(this._transformCB) {
|
||||
this._transformCB();
|
||||
this._transformCB = null;
|
||||
}
|
||||
}
|
||||
|
||||
[util.inspect.custom]() {
|
||||
return Base.prototype[util.inspect.custom].call(this);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = BaseTransformer;
|
78
node_modules/eris/lib/voice/streams/DCAOpusTransformer.js
generated
vendored
Normal file
78
node_modules/eris/lib/voice/streams/DCAOpusTransformer.js
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
"use strict";
|
||||
|
||||
const BaseTransformer = require("./BaseTransformer");
|
||||
|
||||
class DCAOpusTransformer extends BaseTransformer {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
process(buffer) {
|
||||
if(buffer.length - buffer._index < 2) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const opusLen = buffer.readInt16LE(buffer._index);
|
||||
buffer._index += 2;
|
||||
|
||||
if(buffer.length - buffer._index < opusLen) {
|
||||
return true;
|
||||
}
|
||||
|
||||
buffer._index += opusLen;
|
||||
this.push(buffer.subarray(buffer._index - opusLen, buffer._index));
|
||||
}
|
||||
|
||||
_transform(chunk, enc, cb) {
|
||||
if(this._remainder) {
|
||||
chunk = Buffer.concat([this._remainder, chunk]);
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
if(!this.head) {
|
||||
if(chunk.length < 4) {
|
||||
this._remainder = chunk;
|
||||
return cb();
|
||||
} else {
|
||||
const dcaVersion = chunk.subarray(0, 4);
|
||||
if(dcaVersion[0] !== 68 || dcaVersion[1] !== 67 || dcaVersion[2] !== 65) { // DCA0 or invalid
|
||||
this.head = true; // Attempt to play as if it were a DCA0 file
|
||||
} else if(dcaVersion[3] === 49) { // DCA1
|
||||
if(chunk.length < 8) {
|
||||
this._remainder = chunk;
|
||||
return cb();
|
||||
}
|
||||
const jsonLength = chunk.subarray(4, 8).readInt32LE(0);
|
||||
if(chunk.length < 8 + jsonLength) {
|
||||
this._remainder = chunk;
|
||||
return cb();
|
||||
}
|
||||
const jsonMetadata = chunk.subarray(8, 8 + jsonLength);
|
||||
this.emit("debug", jsonMetadata);
|
||||
chunk = chunk.subarray(8 + jsonLength);
|
||||
this.head = true;
|
||||
} else {
|
||||
this.emit("error", new Error("Unsupported DCA version: " + dcaVersion.toString()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chunk._index = 0;
|
||||
|
||||
while(chunk._index < chunk.length) {
|
||||
const offset = chunk._index;
|
||||
const ret = this.process(chunk);
|
||||
if(ret) {
|
||||
this._remainder = chunk.subarray(offset);
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.setTransformCB(cb);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = DCAOpusTransformer;
|
199
node_modules/eris/lib/voice/streams/FFmpegDuplex.js
generated
vendored
Normal file
199
node_modules/eris/lib/voice/streams/FFmpegDuplex.js
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
||||
"use strict";
|
||||
const util = require("util");
|
||||
const Base = require("../../structures/Base");
|
||||
const ChildProcess = require("child_process");
|
||||
const DuplexStream = require("stream").Duplex;
|
||||
const PassThroughStream = require("stream").PassThrough;
|
||||
|
||||
const delegateEvents = {
|
||||
readable: "_reader",
|
||||
data: "_reader",
|
||||
end: "_reader",
|
||||
drain: "_writer",
|
||||
finish: "_writer"
|
||||
};
|
||||
|
||||
class FFmpegDuplex extends DuplexStream {
|
||||
constructor(command, options = {}) {
|
||||
if(options.highWaterMark === undefined) {
|
||||
options.highWaterMark = 0;
|
||||
}
|
||||
super(options);
|
||||
|
||||
this.command = command;
|
||||
this._reader = new PassThroughStream(options);
|
||||
this._writer = new PassThroughStream(options);
|
||||
|
||||
this._onError = this.emit.bind(this, "error");
|
||||
|
||||
this._reader.on("error", this._onError);
|
||||
this._writer.on("error", this._onError);
|
||||
|
||||
this._readableState = this._reader._readableState;
|
||||
this._writableState = this._writer._writableState;
|
||||
|
||||
["on", "once", "removeListener", "removeListeners", "listeners"].forEach((method) => {
|
||||
const og = DuplexStream.prototype[method];
|
||||
|
||||
this[method] = function(ev, fn) {
|
||||
const substream = delegateEvents[ev];
|
||||
if(substream) {
|
||||
return this[substream][method](ev, fn);
|
||||
} else {
|
||||
return og.call(this, ev, fn);
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
destroy() {
|
||||
}
|
||||
|
||||
end(chunk, enc, cb) {
|
||||
return this._writer.end(chunk, enc, cb);
|
||||
}
|
||||
|
||||
kill() {
|
||||
}
|
||||
|
||||
noop() {
|
||||
}
|
||||
|
||||
pipe(dest, opts) {
|
||||
return this._reader.pipe(dest, opts);
|
||||
}
|
||||
|
||||
read(size) {
|
||||
return this._reader.read(size);
|
||||
}
|
||||
|
||||
setEncoding(enc) {
|
||||
return this._reader.setEncoding(enc);
|
||||
}
|
||||
|
||||
spawn(args, options = {}) {
|
||||
let ex, exited, killed, ended;
|
||||
let stderr = [];
|
||||
|
||||
const onStdoutEnd = () => {
|
||||
if(exited && !ended) {
|
||||
ended = true;
|
||||
this._reader.end();
|
||||
setImmediate(this.emit.bind(this, "close"));
|
||||
}
|
||||
};
|
||||
|
||||
const onStderrData = (chunk) => {
|
||||
stderr.push(chunk);
|
||||
};
|
||||
|
||||
const cleanup = () => {
|
||||
this._process =
|
||||
this._stderr =
|
||||
this._stdout =
|
||||
this._stdin =
|
||||
stderr =
|
||||
ex =
|
||||
killed = null;
|
||||
|
||||
this.kill =
|
||||
this.destroy = this.noop;
|
||||
};
|
||||
|
||||
const onExit = (code, signal) => {
|
||||
if(exited) {
|
||||
return;
|
||||
}
|
||||
exited = true;
|
||||
|
||||
if(killed) {
|
||||
if(ex) {
|
||||
this.emit("error", ex);
|
||||
}
|
||||
this.emit("close");
|
||||
} else if(code === 0 && signal == null) {
|
||||
// All is well
|
||||
onStdoutEnd();
|
||||
} else {
|
||||
// Everything else
|
||||
ex = new Error("Command failed: " + Buffer.concat(stderr).toString("utf8"));
|
||||
ex.killed = this._process.killed || killed;
|
||||
ex.code = code;
|
||||
ex.signal = signal;
|
||||
this.emit("error", ex);
|
||||
this.emit("close");
|
||||
}
|
||||
|
||||
cleanup();
|
||||
};
|
||||
|
||||
const onError = (err) => {
|
||||
ex = err;
|
||||
this._stdout.destroy();
|
||||
this._stderr.destroy();
|
||||
onExit();
|
||||
};
|
||||
|
||||
const kill = () => {
|
||||
if(killed) {
|
||||
return;
|
||||
}
|
||||
this._stdout.destroy();
|
||||
this._stderr.destroy();
|
||||
|
||||
killed = true;
|
||||
|
||||
try {
|
||||
this._process.kill(options.killSignal || "SIGTERM");
|
||||
setTimeout(() => this._process && this._process.kill("SIGKILL"), 2000);
|
||||
} catch(e) {
|
||||
ex = e;
|
||||
onExit();
|
||||
}
|
||||
};
|
||||
|
||||
this._process = ChildProcess.spawn(this.command, args, options);
|
||||
this._stdin = this._process.stdin;
|
||||
this._stdout = this._process.stdout;
|
||||
this._stderr = this._process.stderr;
|
||||
this._writer.pipe(this._stdin);
|
||||
this._stdout.pipe(this._reader, {
|
||||
end: false
|
||||
});
|
||||
this.kill = this.destroy = kill;
|
||||
|
||||
this._stderr.on("data", onStderrData);
|
||||
|
||||
// In some cases ECONNRESET can be emitted by stdin because the process is not interested in any
|
||||
// more data but the _writer is still piping. Forget about errors emitted on stdin and stdout
|
||||
this._stdin.on("error", this.noop);
|
||||
this._stdout.on("error", this.noop);
|
||||
|
||||
this._stdout.on("end", onStdoutEnd);
|
||||
|
||||
this._process.once("close", onExit);
|
||||
this._process.once("error", onError);
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
unpipe(dest) {
|
||||
return this._reader.unpipe(dest) || this.kill();
|
||||
}
|
||||
|
||||
write(chunk, enc, cb) {
|
||||
return this._writer.write(chunk, enc, cb);
|
||||
}
|
||||
|
||||
[util.inspect.custom]() {
|
||||
return Base.prototype[util.inspect.custom].call(this);
|
||||
}
|
||||
}
|
||||
|
||||
FFmpegDuplex.prototype.addListener = FFmpegDuplex.prototype.on;
|
||||
|
||||
FFmpegDuplex.spawn = function(connection, args, options) {
|
||||
return new FFmpegDuplex(connection, options).spawn(args, options);
|
||||
};
|
||||
|
||||
module.exports = FFmpegDuplex;
|
35
node_modules/eris/lib/voice/streams/FFmpegOggTransformer.js
generated
vendored
Normal file
35
node_modules/eris/lib/voice/streams/FFmpegOggTransformer.js
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
"use strict";
|
||||
|
||||
const FFmpegDuplex = require("./FFmpegDuplex");
|
||||
|
||||
module.exports = function(options = {}) {
|
||||
if(!options.command) {
|
||||
throw new Error("Invalid converter command");
|
||||
}
|
||||
if(options.frameDuration === undefined) {
|
||||
options.frameDuration = 60;
|
||||
}
|
||||
let inputArgs = [
|
||||
"-analyzeduration", "0",
|
||||
"-loglevel", "24"
|
||||
].concat(options.inputArgs || []);
|
||||
if(options.format === "pcm") {
|
||||
inputArgs = inputArgs.concat(
|
||||
"-f", "s16le",
|
||||
"-ar", "48000",
|
||||
"-ac", "2"
|
||||
);
|
||||
}
|
||||
inputArgs = inputArgs.concat(
|
||||
"-i", options.input || "-",
|
||||
"-vn"
|
||||
);
|
||||
const outputArgs = [
|
||||
"-c:a", "libopus",
|
||||
"-vbr", "on",
|
||||
"-frame_duration", "" + options.frameDuration,
|
||||
"-f", "ogg",
|
||||
"-"
|
||||
];
|
||||
return FFmpegDuplex.spawn(options.command, inputArgs.concat(options.encoderArgs || [], outputArgs));
|
||||
};
|
26
node_modules/eris/lib/voice/streams/FFmpegPCMTransformer.js
generated
vendored
Normal file
26
node_modules/eris/lib/voice/streams/FFmpegPCMTransformer.js
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
"use strict";
|
||||
|
||||
const FFmpegDuplex = require("./FFmpegDuplex");
|
||||
|
||||
module.exports = function(options = {}) {
|
||||
if(!options.command) {
|
||||
throw new Error("Invalid converter command");
|
||||
}
|
||||
if(options.samplingRate === undefined) {
|
||||
options.samplingRate = 48000;
|
||||
}
|
||||
const inputArgs = [
|
||||
"-analyzeduration", "0",
|
||||
"-loglevel", "24"
|
||||
].concat(options.inputArgs || [],
|
||||
"-i", options.input || "-",
|
||||
"-vn"
|
||||
);
|
||||
const outputArgs = [
|
||||
"-f", "s16le",
|
||||
"-ar", "" + options.samplingRate,
|
||||
"-ac", "2",
|
||||
"-"
|
||||
];
|
||||
return FFmpegDuplex.spawn(options.command, inputArgs.concat(options.encoderArgs || [], outputArgs));
|
||||
};
|
107
node_modules/eris/lib/voice/streams/OggOpusTransformer.js
generated
vendored
Normal file
107
node_modules/eris/lib/voice/streams/OggOpusTransformer.js
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
"use strict";
|
||||
|
||||
const BaseTransformer = require("./BaseTransformer");
|
||||
|
||||
class OggOpusTransformer extends BaseTransformer {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
|
||||
this._remainder = null;
|
||||
this._bitstream = null;
|
||||
}
|
||||
|
||||
process(buffer) {
|
||||
if(buffer.length - buffer._index <= 26) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if(buffer.toString("utf8", buffer._index, buffer._index + 4) !== "OggS") {
|
||||
return new Error("Invalid OGG magic string: " + buffer.toString("utf8", buffer._index, buffer._index + 4));
|
||||
}
|
||||
|
||||
const typeFlag = buffer.readUInt8(buffer._index + 5);
|
||||
if(typeFlag === 1) {
|
||||
return new Error("OGG continued page not supported");
|
||||
}
|
||||
|
||||
const bitstream = buffer.readUInt32BE(buffer._index + 14);
|
||||
|
||||
buffer._index += 26;
|
||||
|
||||
const segmentCount = buffer.readUInt8(buffer._index);
|
||||
if(buffer.length - buffer._index - 1 < segmentCount) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const segments = [];
|
||||
let size = 0;
|
||||
let byte = 0;
|
||||
let total = 0;
|
||||
let i = 0;
|
||||
for(; i < segmentCount; i++) {
|
||||
byte = buffer.readUInt8(++buffer._index);
|
||||
if(byte < 255) {
|
||||
segments.push(size + byte);
|
||||
size = 0;
|
||||
} else {
|
||||
size += byte;
|
||||
}
|
||||
total += byte;
|
||||
}
|
||||
|
||||
++buffer._index;
|
||||
|
||||
if(buffer.length - buffer._index < total) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for(let segment of segments) {
|
||||
buffer._index += segment;
|
||||
byte = (segment = buffer.subarray(buffer._index - segment, buffer._index)).toString("utf8", 0, 8);
|
||||
if(this.head) {
|
||||
if(byte === "OpusTags") {
|
||||
this.emit("debug", segment.toString());
|
||||
} else if(bitstream === this._bitstream) {
|
||||
this.push(segment);
|
||||
}
|
||||
} else if(byte === "OpusHead") {
|
||||
this._bitstream = bitstream;
|
||||
this.emit("debug", (this.head = segment.toString()));
|
||||
} else {
|
||||
this.emit("debug", "Invalid codec: " + byte);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_final() {
|
||||
if(!this._bitstream) {
|
||||
this.emit("error", new Error("No Opus stream was found"));
|
||||
}
|
||||
}
|
||||
|
||||
_transform(chunk, enc, cb) {
|
||||
if(this._remainder) {
|
||||
chunk = Buffer.concat([this._remainder, chunk]);
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
chunk._index = 0;
|
||||
|
||||
while(chunk._index < chunk.length) {
|
||||
const offset = chunk._index;
|
||||
const ret = this.process(chunk);
|
||||
if(ret) {
|
||||
this._remainder = chunk.subarray(offset);
|
||||
if(ret instanceof Error) {
|
||||
this.emit("error", ret);
|
||||
}
|
||||
cb();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.setTransformCB(cb);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = OggOpusTransformer;
|
61
node_modules/eris/lib/voice/streams/PCMOpusTransformer.js
generated
vendored
Normal file
61
node_modules/eris/lib/voice/streams/PCMOpusTransformer.js
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
"use strict";
|
||||
|
||||
const BaseTransformer = require("./BaseTransformer");
|
||||
|
||||
class PCMOpusTransformer extends BaseTransformer {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
|
||||
this.opus = options.opusFactory();
|
||||
this.frameSize = options.frameSize || 2880;
|
||||
this.pcmSize = options.pcmSize || 11520;
|
||||
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
_destroy(...args) {
|
||||
if(this.opus.delete) {
|
||||
this.opus.delete();
|
||||
}
|
||||
|
||||
return super._destroy(...args);
|
||||
}
|
||||
|
||||
_flush(cb) {
|
||||
if(this._remainder) {
|
||||
const buf = Buffer.allocUnsafe(this.pcmSize);
|
||||
this._remainder.copy(buf);
|
||||
buf.fill(0, this._remainder.length);
|
||||
this.push(this.opus.encode(buf, this.frameSize));
|
||||
this._remainder = null;
|
||||
}
|
||||
cb();
|
||||
}
|
||||
|
||||
_transform(chunk, enc, cb) {
|
||||
if(this._remainder) {
|
||||
chunk = Buffer.concat([this._remainder, chunk]);
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
if(chunk.length < this.pcmSize) {
|
||||
this._remainder = chunk;
|
||||
return cb();
|
||||
}
|
||||
|
||||
chunk._index = 0;
|
||||
|
||||
while(chunk._index + this.pcmSize < chunk.length) {
|
||||
chunk._index += this.pcmSize;
|
||||
this.push(this.opus.encode(chunk.subarray(chunk._index - this.pcmSize, chunk._index), this.frameSize));
|
||||
}
|
||||
|
||||
if(chunk._index < chunk.length) {
|
||||
this._remainder = chunk.subarray(chunk._index);
|
||||
}
|
||||
|
||||
this.setTransformCB(cb);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = PCMOpusTransformer;
|
50
node_modules/eris/lib/voice/streams/VolumeTransformer.js
generated
vendored
Normal file
50
node_modules/eris/lib/voice/streams/VolumeTransformer.js
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
"use strict";
|
||||
|
||||
const BaseTransformer = require("./BaseTransformer");
|
||||
|
||||
class VolumeTransformer extends BaseTransformer {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
|
||||
this._remainder = null;
|
||||
this.setVolume(1.0);
|
||||
}
|
||||
|
||||
setVolume(volume) {
|
||||
if(isNaN(volume) || (volume = +volume) < 0) {
|
||||
throw new Error("Invalid volume level: " + volume);
|
||||
}
|
||||
this.volume = volume;
|
||||
this.db = 10 * Math.log(1 + this.volume) / 6.931471805599453;
|
||||
}
|
||||
|
||||
_transform(chunk, enc, cb) {
|
||||
if(this._remainder) {
|
||||
chunk = Buffer.concat([this._remainder, chunk]);
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
if(chunk.length < 2) {
|
||||
return cb();
|
||||
}
|
||||
|
||||
let buf;
|
||||
if(chunk.length & 1) {
|
||||
this._remainder = chunk.subarray(chunk.length - 1);
|
||||
buf = Buffer.allocUnsafe(chunk.length - 1);
|
||||
} else {
|
||||
buf = Buffer.allocUnsafe(chunk.length);
|
||||
}
|
||||
|
||||
for(let i = 0, num; i < buf.length - 1; i += 2) {
|
||||
// Bind transformed chunk to to 16 bit
|
||||
num = ~~(this.db * chunk.readInt16LE(i));
|
||||
buf.writeInt16LE(num >= 32767 ? 32767 : num <= -32767 ? -32767 : num, i);
|
||||
}
|
||||
|
||||
this.push(buf);
|
||||
this.setTransformCB(cb);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = VolumeTransformer;
|
258
node_modules/eris/lib/voice/streams/WebmOpusTransformer.js
generated
vendored
Normal file
258
node_modules/eris/lib/voice/streams/WebmOpusTransformer.js
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
"use strict";
|
||||
|
||||
const BaseTransformer = require("./BaseTransformer");
|
||||
|
||||
// EBML VInt max value is (2 ^ 56 - 2), but JS only supports 2^53
|
||||
// 45 = 53 - 8 - check before last 8 bytes
|
||||
const MAX_SHIFTED_VINT = Math.pow(2, 45);
|
||||
|
||||
const STATE_CONTENT = 0;
|
||||
const STATE_TAG = 1;
|
||||
|
||||
const TAG_TYPE_END = 0;
|
||||
const TAG_TYPE_START = 1;
|
||||
const TAG_TYPE_TAG = 2;
|
||||
|
||||
const TRACKTYPE_AUDIO = 2; // EBML spec: https://www.matroska.org/technical/specs/index.html#TrackType
|
||||
|
||||
class WebmOpusTransformer extends BaseTransformer {
|
||||
constructor(options = {}) {
|
||||
super(options);
|
||||
|
||||
this._tag_stack = [];
|
||||
this._state = STATE_TAG;
|
||||
this._total = 0;
|
||||
}
|
||||
|
||||
getVIntLength(buffer, index) {
|
||||
let length = 1;
|
||||
for(; length <= 8; ++length) {
|
||||
if(buffer[index] & (1 << (8 - length))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(length > 8) {
|
||||
this.emit("debug", new Error(`VInt length ${length} | ${buffer.toString("hex", index, index + length)}`));
|
||||
return null;
|
||||
}
|
||||
if(index + length > buffer.length) {
|
||||
return null;
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
process(type, info) {
|
||||
if(type === TAG_TYPE_TAG) {
|
||||
if(info.name === "SimpleBlock" && (info.data.readUInt8(0) & 0xF) === this.firstAudioTrack.TrackNumber) {
|
||||
this.push(info.data.subarray(4));
|
||||
return;
|
||||
}
|
||||
if(info.name === "CodecPrivate") {
|
||||
const head = info.data.toString("utf8", 0, 8);
|
||||
if(head !== "OpusHead") {
|
||||
this.emit("error", new Error("Invalid codec: " + head));
|
||||
return;
|
||||
}
|
||||
|
||||
this.codecData = {
|
||||
version: info.data.readUInt8(8),
|
||||
channelCount: info.data.readUInt8(9),
|
||||
preSkip: info.data.readUInt16LE(10),
|
||||
inputSampleRate: info.data.readUInt32LE(12),
|
||||
outputGain: info.data.readUInt16LE(16),
|
||||
mappingFamily: info.data.readUInt8(18)
|
||||
};
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if(!this.firstAudioTrack) {
|
||||
if(info.name === "TrackEntry") {
|
||||
if(type === TAG_TYPE_START) {
|
||||
this.parsingTrack = {};
|
||||
} else if(type === TAG_TYPE_END) {
|
||||
if(this.parsingTrack.TrackNumber && this.parsingTrack.TrackType === TRACKTYPE_AUDIO) {
|
||||
this.firstAudioTrack = this.parsingTrack;
|
||||
}
|
||||
delete this.parsingTrack;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if(this.parsingTrack) {
|
||||
if(info.name === "TrackNumber") {
|
||||
this.parsingTrack.TrackNumber = info.data[0];
|
||||
return;
|
||||
}
|
||||
if(info.name === "TrackType") {
|
||||
this.parsingTrack.TrackType = info.data[0];
|
||||
return;
|
||||
}
|
||||
}
|
||||
if(type === TAG_TYPE_END && info.name === "Tracks") {
|
||||
this.emit("error", new Error("No audio track"));
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
readContent(buffer) {
|
||||
const tagObj = this._tag_stack[this._tag_stack.length - 1];
|
||||
|
||||
if(tagObj.type === "m") {
|
||||
this.process(TAG_TYPE_START, tagObj);
|
||||
this._state = STATE_TAG;
|
||||
return true;
|
||||
}
|
||||
|
||||
if(buffer.length < buffer._index + tagObj.size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tagObj.data = buffer.subarray(buffer._index, buffer._index + tagObj.size);
|
||||
buffer._index += tagObj.size;
|
||||
this._total += tagObj.size;
|
||||
this._state = STATE_TAG;
|
||||
|
||||
this._tag_stack.pop();
|
||||
|
||||
this.process(TAG_TYPE_TAG, tagObj);
|
||||
|
||||
while(this._tag_stack.length > 0) {
|
||||
if(this._total < this._tag_stack[this._tag_stack.length - 1].end) {
|
||||
break;
|
||||
}
|
||||
this.process(TAG_TYPE_END, this._tag_stack.pop());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
readTag(buffer) {
|
||||
const tagSize = this.getVIntLength(buffer, buffer._index);
|
||||
if(tagSize === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const size = this.getVIntLength(buffer, buffer._index + tagSize);
|
||||
if(size === null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const tagStr = buffer.toString("hex", buffer._index, buffer._index + tagSize);
|
||||
|
||||
const tagObj = {
|
||||
type: "unknown",
|
||||
name: "unknown",
|
||||
end: this._total + tagSize
|
||||
};
|
||||
if(schema[tagStr]) {
|
||||
tagObj.type = schema[tagStr].type;
|
||||
tagObj.name = schema[tagStr].name;
|
||||
}
|
||||
|
||||
buffer._index += tagSize;
|
||||
|
||||
let value = buffer[buffer._index] & (1 << (8 - size)) - 1;
|
||||
for(let i = 1; i < size; ++i) {
|
||||
if(i === 7 && value >= MAX_SHIFTED_VINT && buffer[buffer._index + 7] > 0) {
|
||||
tagObj.end = -1; // Special livestreaming int 0x1FFFFFFFFFFFFFF
|
||||
break;
|
||||
}
|
||||
value = (value << 8) + buffer[buffer._index + i];
|
||||
}
|
||||
if(tagObj.end !== -1) {
|
||||
tagObj.end += value + size;
|
||||
}
|
||||
tagObj.size = value;
|
||||
|
||||
buffer._index += size;
|
||||
this._total += tagSize + size;
|
||||
this._state = STATE_CONTENT;
|
||||
|
||||
this._tag_stack.push(tagObj);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
_transform(chunk, enc, cb) {
|
||||
if(this._remainder) {
|
||||
chunk = Buffer.concat([this._remainder, chunk]);
|
||||
this._remainder = null;
|
||||
}
|
||||
|
||||
chunk._index = 0;
|
||||
|
||||
while(chunk._index < chunk.length) {
|
||||
if(this._state === STATE_TAG && !this.readTag(chunk)) {
|
||||
break;
|
||||
}
|
||||
if(this._state === STATE_CONTENT && !this.readContent(chunk)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(chunk._index < chunk.length) {
|
||||
this._remainder = chunk.subarray(chunk._index);
|
||||
}
|
||||
|
||||
this.setTransformCB(cb);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = WebmOpusTransformer;
|
||||
|
||||
const schema = {
|
||||
ae: {
|
||||
name: "TrackEntry",
|
||||
type: "m"
|
||||
},
|
||||
d7: {
|
||||
name: "TrackNumber",
|
||||
type: "u"
|
||||
},
|
||||
"86": {
|
||||
name: "CodecID",
|
||||
type: "s"
|
||||
},
|
||||
"83": {
|
||||
name: "TrackType",
|
||||
type: "u"
|
||||
},
|
||||
"1654ae6b": {
|
||||
name: "Tracks",
|
||||
type: "m"
|
||||
},
|
||||
"63a2": {
|
||||
name: "CodecPrivate",
|
||||
type: "b"
|
||||
},
|
||||
a3: {
|
||||
name: "SimpleBlock",
|
||||
type: "b"
|
||||
},
|
||||
"1a45dfa3": {
|
||||
name: "EBML",
|
||||
type: "m"
|
||||
},
|
||||
"18538067": {
|
||||
name: "Segment",
|
||||
type: "m"
|
||||
},
|
||||
"114d9b74": {
|
||||
name: "SeekHead",
|
||||
type: "m"
|
||||
},
|
||||
"1549a966": {
|
||||
name: "Info",
|
||||
type: "m"
|
||||
},
|
||||
e1: {
|
||||
name: "Audio",
|
||||
type: "m"
|
||||
},
|
||||
"1f43b675": {
|
||||
name: "Cluster",
|
||||
type: "m"
|
||||
}
|
||||
};
|
Reference in New Issue
Block a user