Skip to content

Commit

Permalink
Merge pull request #21 from rbouqueau/audio_and_playback_issues
Browse files Browse the repository at this point in the history
Fix audio and playback issues
  • Loading branch information
jpiesing authored Feb 15, 2021
2 parents 51eb86f + 8586106 commit 5fae3b8
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 54 deletions.
19 changes: 11 additions & 8 deletions encode_dash.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def process_mpd(self, DOMTree, mpd):
profiles += "," + chunked_profile
mpd.setAttribute('profiles', profiles)

# Remove ServiceDescrition element if present (somehow ffmpeg 4.3 adds this to the mpd by default, removed for now)
# Remove ServiceDescription element if present (somehow ffmpeg 4.3 adds this to the mpd by default, removed for now)
service_descriptions = mpd.getElementsByTagName("ServiceDescription")
self.remove_element(service_descriptions)

Expand Down Expand Up @@ -317,7 +317,8 @@ def __init__(self, representation_config):
print("Supported video sample entries for AVC are \"avc1\" and \"avc3\" and"
" for HEVC \"hev1\" and \"hvc1\".")
sys.exit(1)
self.m_video_sample_entry = value
else:
self.m_video_sample_entry = value
elif name == "cmaf":
self.m_cmaf_profile = value
if value == "avcsd":
Expand Down Expand Up @@ -381,10 +382,12 @@ def __init__(self, representation_config):
else:
print("Unknown configuration option for representation: " + name + " , it will be ignored.")

# Sanity checks
if self.m_id is None or self.m_input is None or self.m_media_type is None or self.m_codec is None or \
self.m_bitrate is None or self.m_cmaf_profile is None:
self.m_bitrate is None and self.m_cmaf_profile:
print("For each representation at least the following 6 parameters must be provided: " +
"<representation_id>,<input_file>,<media_type>,<codec>,<bitrate>,<cmaf_profile>")
"<representation_id>{0},<input_file>{1},<media_type>{2},<codec>{3},<bitrate>{4},<cmaf_profile>{5}"\
.format(self.m_id, self.m_input, self.m_media_type, self.m_codec, self.m_bitrate, self.m_cmaf_profile))
sys.exit(1)

def form_command(self, index):
Expand Down Expand Up @@ -453,7 +456,7 @@ def generate_log(ffmpeg_path, command):
f.write("CTA Test Content Generation Log (Generated at: " + "'{0}' '{1}'".format(date, time) + ")\n\n\n\n")

f.write("-----------------------------------\n")
f.write("FFMPEG Information:\n")
f.write("FFmpeg Information:\n")
f.write("-----------------------------------\n")
f.write("%s\n\n\n\n" % result.stdout.decode('ascii'))

Expand All @@ -471,7 +474,7 @@ def generate_log(ffmpeg_path, command):

# Parse input arguments
# Output MPD: --out="<desired_mpd_name>"
# FFMpeg binary path: -–path="path/to/ffmpeg"
# FFmpeg binary path: -–path="path/to/ffmpeg"
# Representation configuration: --reps="<rep1_config rep2_config … repN_config>"
# DASHing configuration: --dash="<dash_config>"
def parse_args(args):
Expand Down Expand Up @@ -508,7 +511,7 @@ def assert_configuration(configuration):
out_dir = configuration[4]
result = subprocess.run(ffmpeg_path + " -version", shell=True, stdout=PIPE, stderr=PIPE)
if "ffmpeg version" not in result.stdout.decode('ascii'):
print("FFMPEG binary is checked in the \"" + ffmpeg_path + "\" path, but not found.")
print("ffmpeg binary is checked in the \"" + ffmpeg_path + "\" path, but not found.")
sys.exit(1)

if output_file is None:
Expand Down Expand Up @@ -561,7 +564,7 @@ def assert_configuration(configuration):
options.append(representation.form_command(str(index_a)))
index_a += 1
else:
print("Media type for a representation denoted by <type> can either be \"v\" or \"video\" fro video media"
print("Media type for a representation denoted by <type> can either be \"v\" or \"video\" for video media"
"or \"a\" or \"audio\" for audio media.")
exit(1)

Expand Down
97 changes: 51 additions & 46 deletions run-all.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,37 +7,35 @@
import json
import pysftp

# Output: SFTP credentials
host = "dashstorage.upload.akamai.com"
username = "sshacs"


cnopts = pysftp.CnOpts(knownhosts=host)
cnopts.hostkeys = None
basePath = '/129021/dash/WAVE/vectors'
outputFolder = '/129021/dash/WAVE/vectors/'

# TODO: link to params.csv - currently only references Switching Set X1
resolutions = [
['1920x1080',7800, 60, "content_files/tos_O1_3840x2160@60_60.mp4" ],['1920x1080',6000, 60, "content_files/tos_O2_3840x2160@60_60.mp4" ],
['1280x720',4500, 60, "content_files/tos_O3_3840x2160@60_60.mp4" ],['1280x720',3000, 60, "content_files/tos_N1_3200x1800@60_60.mp4" ],
['768x432',1100, 30, "content_files/tos_M1_2560x1440@60_60.mp4" ],['768x432',730, 30, "content_files/tos_L1_1920x1080@60_60.mp4" ]
[
['1920x1080', 6000, 60, "content_files/tos_L2_1920x1080@60_60.mp4" ],
['1920x1080', 4500, 30, "content_files/tos_L1_1920x1080@30_60.mp4" ],
['1280x720' , 3000, 60, "content_files/tos_J1_1280x720@60_60.mp4" ],
['1024x576' , 1500, 30, "content_files/tos_I2_1024x576@30_60.mp4" ],
['1024x576' , 1200, 30, "content_files/tos_I1_1024x576@30_60.mp4" ],
['768x432' , 900 , 30, "content_files/tos_F1_768x432@30_60.mp4" ],
['512x288' , 450 , 30, "content_files/tos_B1_512x288@30_60.mp4" ],
['480x270' , 300 , 15, "content_files/tos_A1_480x270@15_60.mp4" ]
]
]

database = { }
filepath = './database.json'
with pysftp.Connection(host=host, username=username, private_key=os.path.expanduser(os.environ['PASSWORD']), cnopts=cnopts) as sftp:
print("Connection succesfully stablished ... ")
# Switch to a remote directory
sftp.cwd(basePath)
# Print data
database = { }

sftp.get('./database.json', filepath)

if os.path.isfile(filepath):
with open(filepath) as json_file:
database = json.load(json_file)

# Open the input parameter params
with open('params.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
id = "wave_{0}_{1}" # contains
id = "wave_{0}_{1}"
for row in csv_reader:
line_count = line_count + 1
if line_count == 1:
Expand All @@ -46,48 +44,55 @@
key = id.format("avc_sets", row[0])
reps = []

for i in range(len(resolutions)):
reps =reps + [{"resolution": resolutions[i][0], "framerate": resolutions[i][2], "bitrate": resolutions[i][1], "input": resolutions[i][3]}]
reps_command += "id:{0},type:v,codec:h264,vse:{1},cmaf:avchdhf,fps:{2},res:{3},bitrate:{4},input:{5},sei:{6},vui_timing:{7}".format(i, row[3], resolutions[i][2], resolutions[i][0], resolutions[i][1], resolutions[i][3], row[1].capitalize(), row[2].capitalize())
if i != len(resolutions) -1:
reps_command += "\|"
for i in range(len(resolutions[0])):
reps += [{"resolution": resolutions[0][i][0], "framerate": resolutions[0][i][2], "bitrate": resolutions[0][i][1], "input": resolutions[0][i][3]}]
codec="h264"
cmaf_profile="avchdhf"
reps_command += "id:{0},type:video,codec:{1},vse:{2},cmaf:{3},fps:{4},res:{5},bitrate:{6},input:{7},sei:{8},vui_timing:{9}"\
.format(i, codec, row[3], cmaf_profile, resolutions[0][i][2], resolutions[0][i][0], resolutions[0][i][1], resolutions[0][i][3], row[1].capitalize(), row[2].capitalize())

reps_command += "\|"

#add audio
reps_command += "id:{0},type:audio,codec:aac,bitrate:{1},input:{2}"\
.format(len(resolutions[0])+1, resolutions[0][i][1], resolutions[0][i][3])

database[key] = {
'representations': reps,
'segmentDuration': row[5],
'fragmentType': row[7],
'hasSEI': row[1].lower() == 'true',
'hasVUITiming': row[2].lower()== 'true',
'visualSampleEntry': row[3],
'segmentDuration': row[5],
'fragmentType': row[7],
'hasSEI': row[1].lower() == 'true',
'hasVUITiming': row[2].lower() == 'true',
'visualSampleEntry': row[3],
'mpdPath': 'avc_sets/{0}/stream.mpd'.format(row[0])
}
command = "./encode_dash.py --path=/usr/local/bin/ffmpeg --out=stream.mpd --outdir=output/{0} --dash=sd:{1},ft:{2} {3}".format(row[0], row[5], row[7], reps_command)

command = "./encode_dash.py --path=/usr/bin/ffmpeg --out=stream.mpd --outdir=output/{0} --dash=sd:{1},ft:{2} {3}".format(row[0], row[5], row[7], reps_command)
print("Executing " + command)
# result = subprocess.run(command, shell=True)

with open(filepath, 'w') as outfile:
json.dump(database, outfile)
result = subprocess.run(command, shell=True)

# Write the database to a file
with open(filepath, 'w') as outfile:
json.dump(database, outfile)

with pysftp.Connection(host=host, username=username, private_key=os.path.expanduser(os.environ['AKAMAI_PRIVATE_KEY']), cnopts=cnopts) as sftp:
print("Connection succesfully stablished ... ")
# Switch to a remote directory
sftp.cwd(basePath)
print("Connection successfully established ... ")


sftp.put(filepath, basePath + '/database.json')
# Switch to a remote directory and put the data base
sftp.cwd(outputFolder)
sftp.put(filepath, outputFolder + filepath)

# Create the directory structure if it does not exist
for root, dirs, files in os.walk('./output', topdown=True):
for root, dirs, files in os.walk('./output', topdown=True):
for name in dirs:
p = os.path.join(root ,name).replace('./output',basePath + '/avc_sets')
p = os.path.join(root ,name).replace('./output', outputFolder + 'avc_sets')
if not sftp.isfile(p):
print("Creating directory " + p)
sftp.mkdir(p, mode=644)


# Put the files
for root, dirs, files in os.walk('./output', topdown=True):
for root, dirs, files in os.walk('./output', topdown=True):
for name in files:
dest = os.path.join(root ,name).replace('./output',basePath + '/avc_sets')
print("upload file " + os.path.join(root ,name) + " to " + dest)
sftp.put(os.path.join(root ,name), dest, callback=lambda x,y: print("{} transfered out of {}".format(x,y)))
dest = os.path.join(root ,name).replace('./output', outputFolder + 'avc_sets')
print("Upload file " + os.path.join(root ,name) + " to " + dest)
sftp.put(os.path.join(root ,name), dest, callback=lambda x,y: print("{} transferred out of {}".format(x,y)))

0 comments on commit 5fae3b8

Please sign in to comment.