Turn audio into a shareable video. forked from nypublicradio/audiogram

index.js 4.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. var path = require("path"),
  2. queue = require("d3").queue,
  3. mkdirp = require("mkdirp"),
  4. rimraf = require("rimraf"),
  5. serverSettings = require("../lib/settings/"),
  6. transports = require("../lib/transports/"),
  7. logger = require("../lib/logger/"),
  8. Profiler = require("../lib/profiler.js"),
  9. probe = require("../lib/probe.js"),
  10. getWaveform = require("./waveform.js"),
  11. initializeCanvas = require("./initialize-canvas.js"),
  12. drawFrames = require("./draw-frames.js"),
  13. combineFrames = require("./combine-frames.js"),
  14. trimAudio = require("./trim.js");
  15. function Audiogram(id) {
  16. // Unique audiogram ID
  17. this.id = id;
  18. // File locations to use
  19. this.dir = path.join(serverSettings.workingDirectory, this.id);
  20. this.audioPath = path.join(this.dir, "audio");
  21. this.videoPath = path.join(this.dir, "video.mp4");
  22. this.frameDir = path.join(this.dir, "frames");
  23. this.profiler = new Profiler();
  24. return this;
  25. }
  26. // Get the waveform data from the audio file, split into frames
  27. Audiogram.prototype.getWaveform = function(cb) {
  28. var self = this;
  29. this.status("probing");
  30. probe(this.audioPath, function(err, data){
  31. if (err) {
  32. return cb(err);
  33. }
  34. if (self.settings.theme.maxDuration && self.settings.theme.maxDuration < data.duration) {
  35. return cb("Exceeds max duration of " + self.settings.theme.maxDuration + "s");
  36. }
  37. self.profiler.size(data.duration);
  38. self.set("numFrames", self.numFrames = Math.floor(data.duration * self.settings.theme.framesPerSecond));
  39. self.status("waveform");
  40. getWaveform(self.audioPath, {
  41. numFrames: self.numFrames,
  42. samplesPerFrame: self.settings.theme.samplesPerFrame,
  43. channels: data.channels
  44. }, function(waveformErr, waveform){
  45. return cb(waveformErr, self.waveform = waveform);
  46. });
  47. });
  48. };
  49. // Trim the audio by the start and end time specified
  50. Audiogram.prototype.trimAudio = function(start, end, cb) {
  51. var self = this;
  52. this.status("trim");
  53. // FFmpeg needs an extension to sniff
  54. var trimmedPath = this.audioPath + "-trimmed.mp3";
  55. trimAudio({
  56. origin: this.audioPath,
  57. destination: trimmedPath,
  58. startTime: start,
  59. endTime: end
  60. }, function(err){
  61. if (err) {
  62. return cb(err);
  63. }
  64. self.audioPath = trimmedPath;
  65. return cb(null);
  66. });
  67. };
  68. // Initialize the canvas and draw all the frames
  69. Audiogram.prototype.drawFrames = function(cb) {
  70. var self = this;
  71. this.status("renderer");
  72. initializeCanvas(this.settings.theme, function(err, renderer){
  73. if (err) {
  74. return cb(err);
  75. }
  76. self.status("frames");
  77. drawFrames(renderer, {
  78. width: self.settings.theme.width,
  79. height: self.settings.theme.height,
  80. numFrames: self.numFrames,
  81. frameDir: self.frameDir,
  82. caption: self.settings.caption,
  83. waveform: self.waveform,
  84. tick: function() {
  85. transports.incrementField(self.id, "framesComplete");
  86. }
  87. }, cb);
  88. });
  89. };
  90. // Combine the frames and audio into the final video with FFmpeg
  91. Audiogram.prototype.combineFrames = function(cb) {
  92. this.status("combine");
  93. combineFrames({
  94. framePath: path.join(this.frameDir, "%06d.png"),
  95. audioPath: this.audioPath,
  96. videoPath: this.videoPath,
  97. framesPerSecond: this.settings.theme.framesPerSecond
  98. }, cb);
  99. };
  100. // Master render function, queue up steps in order
  101. Audiogram.prototype.render = function(cb) {
  102. var self = this,
  103. q = queue(1);
  104. this.status("audio-download");
  105. // Set up tmp directory
  106. q.defer(mkdirp, this.frameDir);
  107. // Download the stored audio file
  108. q.defer(transports.downloadAudio, "audio/" + this.id, this.audioPath);
  109. // If the audio needs to be clipped, clip it first and update the path
  110. if (this.settings.start || this.settings.end) {
  111. q.defer(this.trimAudio.bind(this), this.settings.start || 0, this.settings.end || null);
  112. }
  113. // Get the audio waveform data
  114. q.defer(this.getWaveform.bind(this));
  115. // Draw all the frames
  116. q.defer(this.drawFrames.bind(this));
  117. // Combine audio and frames together with ffmpeg
  118. q.defer(this.combineFrames.bind(this));
  119. // Upload video to S3 or move to local storage
  120. q.defer(transports.uploadVideo, this.videoPath, "video/" + this.id + ".mp4");
  121. // Delete working directory
  122. q.defer(rimraf, this.dir);
  123. // Final callback, results in a URL where the finished video is accessible
  124. q.await(function(err){
  125. if (!err) {
  126. self.set("url", transports.getURL(self.id));
  127. }
  128. logger.debug(self.profiler.print());
  129. return cb(err);
  130. });
  131. return this;
  132. };
  133. Audiogram.prototype.set = function(field, value) {
  134. logger.debug(field + "=" + value);
  135. transports.setField(this.id, field, value);
  136. return this;
  137. };
  138. // Convenience method for .set("status")
  139. Audiogram.prototype.status = function(value) {
  140. this.profiler.start(value);
  141. return this.set("status", value);
  142. };
  143. module.exports = Audiogram;