Turn audio into a shareable video. forked from nypublicradio/audiogram

index.js 4.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. var path = require("path"),
  2. queue = require("d3").queue,
  3. mkdirp = require("mkdirp"),
  4. rimraf = require("rimraf"),
  5. serverSettings = require("../settings/"),
  6. transports = require("../lib/transports/"),
  7. logger = require("../lib/logger/"),
  8. Profiler = require("../lib/profiler.js"),
  9. probe = require("../lib/probe.js"),
  10. getWaveform = require("./waveform.js"),
  11. initializeCanvas = require("./initialize-canvas.js"),
  12. drawFrames = require("./draw-frames.js"),
  13. combineFrames = require("./combine-frames.js"),
  14. trimAudio = require("./trim.js");
  15. function Audiogram(settings) {
  16. // Unique audiogram ID
  17. this.id = settings.id;
  18. this.settings = settings;
  19. // File locations to use
  20. this.dir = path.join(serverSettings.workingDirectory, this.id);
  21. this.audioPath = path.join(this.dir, "audio");
  22. this.videoPath = path.join(this.dir, "video.mp4");
  23. this.frameDir = path.join(this.dir, "frames");
  24. this.profiler = new Profiler();
  25. return this;
  26. }
  27. // Get the waveform data from the audio file, split into frames
  28. Audiogram.prototype.getWaveform = function(cb) {
  29. var self = this;
  30. this.status("probing");
  31. probe(this.audioPath, function(err, data){
  32. if (err) {
  33. return cb(err);
  34. }
  35. if (self.settings.maxDuration && self.settings.maxDuration < data.duration) {
  36. return cb("Exceeds max duration of " + self.settings.maxDuration + "s");
  37. }
  38. self.profiler.size(data.duration);
  39. self.set("numFrames", self.numFrames = Math.floor(data.duration * self.settings.framesPerSecond));
  40. self.status("waveform");
  41. getWaveform(self.audioPath, {
  42. numFrames: self.numFrames,
  43. samplesPerFrame: self.settings.samplesPerFrame
  44. }, function(waveformErr, waveform){
  45. return cb(waveformErr, self.settings.waveform = waveform);
  46. });
  47. });
  48. };
  49. // Trim the audio by the start and end time specified
  50. Audiogram.prototype.trimAudio = function(start, end, cb) {
  51. var self = this;
  52. this.status("trim");
  53. // FFmpeg needs an extension to sniff
  54. var trimmedPath = this.audioPath + "-trimmed.mp3";
  55. trimAudio({
  56. origin: this.audioPath,
  57. destination: trimmedPath,
  58. startTime: start,
  59. endTime: end
  60. }, function(err){
  61. if (err) {
  62. return cb(err);
  63. }
  64. self.audioPath = trimmedPath;
  65. return cb(null);
  66. });
  67. };
  68. // Initialize the canvas and draw all the frames
  69. Audiogram.prototype.drawFrames = function(cb) {
  70. var self = this;
  71. this.status("renderer");
  72. initializeCanvas(this.settings, function(err, renderer){
  73. if (err) {
  74. return cb(err);
  75. }
  76. self.status("frames");
  77. drawFrames(renderer, {
  78. numFrames: self.numFrames,
  79. frameDir: self.frameDir,
  80. tick: function() {
  81. transports.incrementField(self.id, "framesComplete");
  82. }
  83. }, cb);
  84. });
  85. };
  86. // Combine the frames and audio into the final video with FFmpeg
  87. Audiogram.prototype.combineFrames = function(cb) {
  88. this.status("combine");
  89. combineFrames({
  90. framePath: path.join(this.frameDir, "%06d.png"),
  91. audioPath: this.audioPath,
  92. videoPath: this.videoPath,
  93. framesPerSecond: this.settings.framesPerSecond
  94. }, cb);
  95. };
  96. // Master render function, queue up steps in order
  97. Audiogram.prototype.render = function(cb) {
  98. var self = this,
  99. q = queue(1);
  100. this.status("audio-download");
  101. // Set up tmp directory
  102. q.defer(mkdirp, this.frameDir);
  103. // Download the stored audio file
  104. q.defer(transports.downloadAudio, "audio/" + this.id, this.audioPath);
  105. // If the audio needs to be clipped, clip it first and update the path
  106. if (this.settings.start || this.settings.end) {
  107. q.defer(this.trimAudio.bind(this), this.settings.start || 0, this.settings.end);
  108. }
  109. // Get the audio waveform data
  110. q.defer(this.getWaveform.bind(this));
  111. // Draw all the frames
  112. q.defer(this.drawFrames.bind(this));
  113. // Combine audio and frames together with ffmpeg
  114. q.defer(this.combineFrames.bind(this));
  115. // Upload video to S3 or move to local storage
  116. q.defer(transports.uploadVideo, this.videoPath, "video/" + this.id + ".mp4");
  117. // Delete working directory
  118. q.defer(rimraf, this.dir);
  119. // Final callback, results in a URL where the finished video is accessible
  120. q.await(function(err){
  121. if (!err) {
  122. self.set("url", transports.getURL(self.id));
  123. }
  124. logger.debug(self.profiler.print());
  125. return cb(err);
  126. });
  127. return this;
  128. };
  129. Audiogram.prototype.set = function(field, value) {
  130. logger.debug(field + "=" + value);
  131. transports.setField(this.id, field, value);
  132. return this;
  133. };
  134. // Convenience method for .set("status")
  135. Audiogram.prototype.status = function(value) {
  136. this.profiler.start(value);
  137. return this.set("status", value);
  138. };
  139. module.exports = Audiogram;