Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	
		Rob Jaret
		
	commited on
		
		
					Commit 
							
							·
						
						1f7fc9d
	
1
								Parent(s):
							
							811fa16
								
Uploading app.
Browse files- .gitattributes +3 -0
- README.md +14 -0
- app.py +281 -0
- assets/.DS_Store +0 -0
- assets/BirdCalls.mp3 +3 -0
- assets/Chimes.wav +3 -0
- assets/FrenchChildren.wav +3 -0
- assets/GesturesPercStrings.wav +3 -0
- assets/Organ-ND.wav +3 -0
- assets/SilverCaneAbbey-Voices.wav +3 -0
- assets/SingingBowl-OmniMic.wav +3 -0
- assets/SpigotsOfChateauLEtoge.wav +3 -0
- assets/Stylophone.wav +3 -0
- requirements.txt +10 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | |
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            *.wav filter=lfs diff=lfs merge=lfs -text
         | 
| 37 | 
            +
            *.mp3 filter=lfs diff=lfs merge=lfs -text
         | 
| 38 | 
            +
            *.m4a filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    CHANGED
    
    | @@ -10,3 +10,17 @@ pinned: false | |
| 10 | 
             
            ---
         | 
| 11 |  | 
| 12 | 
             
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 10 | 
             
            ---
         | 
| 11 |  | 
| 12 | 
             
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         | 
| 13 | 
            +
             | 
| 14 | 
            +
             | 
| 15 | 
            +
            Built using:
         | 
| 16 | 
            +
            Mac OS Sequoia 15.5
         | 
| 17 | 
            +
            Python 3.12
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            Some observations:
         | 
| 20 | 
            +
            - If all the parameters can be averaged, the result is usuallly a high pitch squeal or low rumble. 
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            Outstanding questions for any interested parties:
         | 
| 23 | 
            +
            - Since it doesn't work well when all params are compatible, are there some params that shouldn't be averaged to keep the resulting model functional?
         | 
| 24 | 
            +
            - Would it make logical sense to reshape the parameters that exist in both models but do not have the same shape so they can be averaged? 
         | 
| 25 | 
            +
            - Anything else that could make the results sonically more like an average of two models?
         | 
| 26 | 
            +
             | 
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,281 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import huggingface_hub
         | 
| 2 | 
            +
            # 
         | 
| 3 | 
            +
            # paths to various models
         | 
| 4 | 
            +
            model_path_configs = {
         | 
| 5 | 
            +
                    "Humpback Whales":      ("Intelligent-Instruments-Lab/rave-models", "humpbacks_pondbrain_b2048_r48000_z20.ts"), 
         | 
| 6 | 
            +
                    "Magnets":              ("Intelligent-Instruments-Lab/rave-models", "magnets_b2048_r48000_z8.ts"), 
         | 
| 7 | 
            +
                    "Big Ensemble":          ("Intelligent-Instruments-Lab/rave-models", "crozzoli_bigensemblesmusic_18d.ts"),
         | 
| 8 | 
            +
                    "Bird Dawn Chorus":       ("Intelligent-Instruments-Lab/rave-models", "birds_dawnchorus_b2048_r48000_z8.ts"), 
         | 
| 9 | 
            +
                    "Speaking & Singing":   ("Intelligent-Instruments-Lab/rave-models", "voice-multi-b2048-r48000-z11.ts"), 
         | 
| 10 | 
            +
                    "Resonator Piano":      ("Intelligent-Instruments-Lab/rave-models", "mrp_strengjavera_b2048_r44100_z16.ts"),
         | 
| 11 | 
            +
                    "Multimbral Guitar":    ("Intelligent-Instruments-Lab/rave-models", "guitar_iil_b2048_r48000_z16.ts"),
         | 
| 12 | 
            +
                    "Organ Archive":        ("Intelligent-Instruments-Lab/rave-models", "organ_archive_b2048_r48000_z16.ts"),
         | 
| 13 | 
            +
                    "Water":                ("Intelligent-Instruments-Lab/rave-models", "water_pondbrain_b2048_r48000_z16.ts"),
         | 
| 14 | 
            +
                    "Brass Sax":            ("shuoyang-zheng/jaspers-rave-models", "aam_brass_sax_b2048_r44100_z8_noncausal.ts"),
         | 
| 15 | 
            +
                    "Speech":               ("shuoyang-zheng/jaspers-rave-models", "librispeech100_b2048_r44100_z8_noncausal.ts"),
         | 
| 16 | 
            +
                    "String":               ("shuoyang-zheng/jaspers-rave-models" ,"aam_string_b2048_r44100_z16_noncausal.ts"),
         | 
| 17 | 
            +
                    "Singer":               ("shuoyang-zheng/jaspers-rave-models","gtsinger_b2048_r44100_z16_noncausal.ts"),
         | 
| 18 | 
            +
                    "Bass":                 ("shuoyang-zheng/jaspers-rave-models","aam_bass_b2048_r44100_z16_noncausal.ts"),
         | 
| 19 | 
            +
                    "Drum":                 ("shuoyang-zheng/jaspers-rave-models","aam_drum_b2048_r44100_z16_noncausal.ts"),
         | 
| 20 | 
            +
                    "Gtr Picking":          ("shuoyang-zheng/jaspers-rave-models","guitar_picking_dm_b2048_r44100_z8_causal.ts"),
         | 
| 21 | 
            +
                }
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            available_audio_files=[
         | 
| 24 | 
            +
                "SilverCaneAbbey-Voices.wav",
         | 
| 25 | 
            +
                "Chimes.wav",
         | 
| 26 | 
            +
                "FrenchChildren.wav",
         | 
| 27 | 
            +
                "Organ-ND.wav",
         | 
| 28 | 
            +
                "SpigotsOfChateauLEtoge.wav",
         | 
| 29 | 
            +
                "Gestures-PercStrings.wav",
         | 
| 30 | 
            +
                "SingingBowl-OmniMic.wav",
         | 
| 31 | 
            +
                "BirdCalls.mp3",
         | 
| 32 | 
            +
                ]
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            model_path_config_keys = sorted(model_path_configs)
         | 
| 35 | 
            +
            model_paths_cache = {}
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            def GetModelPath(model_path_name):
         | 
| 38 | 
            +
                model_path = ()
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                if model_path_name in model_paths_cache.keys():
         | 
| 41 | 
            +
                    model_path = model_paths_cache[model_path_name]
         | 
| 42 | 
            +
                else:
         | 
| 43 | 
            +
                    repo_id, filename = model_path_configs[model_path_name]
         | 
| 44 | 
            +
                    
         | 
| 45 | 
            +
                    model_path = huggingface_hub.hf_hub_download(
         | 
| 46 | 
            +
                    repo_id =repo_id,
         | 
| 47 | 
            +
                    filename = filename,
         | 
| 48 | 
            +
                    cache_dir="../huggingface_hub_cache",
         | 
| 49 | 
            +
                    force_download=False,
         | 
| 50 | 
            +
                    )
         | 
| 51 | 
            +
                    
         | 
| 52 | 
            +
                    print(f"Generated Model Path for {filename}.")
         | 
| 53 | 
            +
                    model_paths_cache[model_path_name] = model_path
         | 
| 54 | 
            +
                    
         | 
| 55 | 
            +
                return model_path 
         | 
| 56 | 
            +
             | 
| 57 | 
            +
            def saveAudio(file_path, audio):
         | 
| 58 | 
            +
                with open(file_path + '.wav', 'wb') as f:
         | 
| 59 | 
            +
                    f.write(audio.data)
         | 
| 60 | 
            +
                    
         | 
| 61 | 
            +
            import torch
         | 
| 62 | 
            +
            import pandas as pd
         | 
| 63 | 
            +
            import copy
         | 
| 64 | 
            +
            import librosa
         | 
| 65 | 
            +
            import ast
         | 
| 66 | 
            +
            import os
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            def AverageRaveModels(rave_a, rave_b, bias = 0):
         | 
| 69 | 
            +
             | 
| 70 | 
            +
                r1_ratio = .5
         | 
| 71 | 
            +
                r2_ratio = .5
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                messages = {}
         | 
| 74 | 
            +
                # bias between -1 and 1
         | 
| 75 | 
            +
                if abs(bias) <= 1:
         | 
| 76 | 
            +
                    if bias > 0:
         | 
| 77 | 
            +
                        r1_ratio = .5 + bias/2
         | 
| 78 | 
            +
                        r2_ratio = 1.0 - r1_ratio
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                        rave_temp = rave_a
         | 
| 81 | 
            +
                    elif bias < 0:
         | 
| 82 | 
            +
                        r2_ratio = .5 + abs(bias)/2
         | 
| 83 | 
            +
                        r1_ratio = 1.0 - r2_ratio
         | 
| 84 | 
            +
                else:
         | 
| 85 | 
            +
                    print(f"Unable to apply bias {bias} - bias must be between -1 and 1.")
         | 
| 86 | 
            +
                
         | 
| 87 | 
            +
                # Get state dictionaries of both models
         | 
| 88 | 
            +
                rave_a_params = rave_a.state_dict()
         | 
| 89 | 
            +
                rave_b_params = rave_b.state_dict()
         | 
| 90 | 
            +
                
         | 
| 91 | 
            +
                # intialize the averaged rave with model_a
         | 
| 92 | 
            +
                rave_avg = copy.deepcopy(rave_a)
         | 
| 93 | 
            +
                avg = rave_avg.state_dict()    
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                # for reporting
         | 
| 96 | 
            +
                keys_averaged={}
         | 
| 97 | 
            +
                keys_not_averaged={}
         | 
| 98 | 
            +
                for key in rave_a_params:
         | 
| 99 | 
            +
                    if key in rave_b_params:
         | 
| 100 | 
            +
                        try:
         | 
| 101 | 
            +
                            avg[key] = ((rave_a_params[key] * r1_ratio) + (rave_b_params[key] * r2_ratio)) 
         | 
| 102 | 
            +
                            keys_averaged[key]=(key, rave_a_params[key].shape, rave_b_params[key].shape, "")
         | 
| 103 | 
            +
                        except Exception as e:
         | 
| 104 | 
            +
                            print(f"Error averaging key {key}: {e}")
         | 
| 105 | 
            +
                            keys_not_averaged[key]=(key, rave_a_params[key].shape, rave_b_params[key].shape, e)
         | 
| 106 | 
            +
                    else:
         | 
| 107 | 
            +
                        print(f"Key {key} not found in rave_b parameters, skipping.")
         | 
| 108 | 
            +
                        # keys_not_averaged(key)
         | 
| 109 | 
            +
                        keys_not_averaged[key]=(key, rave_a_params[key].shape, "n/a", "Key not found in rave_b parameters.")
         | 
| 110 | 
            +
                    
         | 
| 111 | 
            +
                messages["keys_averaged"] = keys_averaged
         | 
| 112 | 
            +
                messages["keys_not_averaged"] = keys_not_averaged
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                messages["stats"] = f'Numb Params Averaged: {len(keys_averaged)}\nNumb Params Unable to Average: {len(keys_not_averaged)}\nPercent Averaged: {len(keys_averaged) * 100/(len(keys_not_averaged) + len(keys_averaged)):5.2f}%'
         | 
| 115 | 
            +
                
         | 
| 116 | 
            +
                # Commit the changes
         | 
| 117 | 
            +
                rave_avg.load_state_dict(avg) 
         | 
| 118 | 
            +
                
         | 
| 119 | 
            +
                return rave_avg, messages
         | 
| 120 | 
            +
             | 
| 121 | 
            +
            def GenerateRaveEncDecAudio(model_name_a, model_name_b, audio_file_name, audio_file, sr_multiple=1, bias=0): #audio_file_name="RJM1240-Gestures.wav"
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                ###############################################
         | 
| 124 | 
            +
                # Choose models from filenames dictionary created in previous cell
         | 
| 125 | 
            +
                # Note: model_path_a is always used to initialize the averaged model.
         | 
| 126 | 
            +
                # Switching them gets different results if the parameters are not all matched.
         | 
| 127 | 
            +
                ###############################################
         | 
| 128 | 
            +
                # Examples - this matches only 21 params, but it sounds like maybe sosme of both are in the result.
         | 
| 129 | 
            +
                model_path_a = GetModelPath(model_name_a)
         | 
| 130 | 
            +
                model_path_b = GetModelPath(model_name_b)
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                # Examples: This has 76 params averaged
         | 
| 133 | 
            +
                # model_path_a = model_paths['Water']
         | 
| 134 | 
            +
                # model_path_b = model_paths['Organ Archive']
         | 
| 135 | 
            +
             | 
| 136 | 
            +
                # Examples: All Params Match but high pitch for averaged version
         | 
| 137 | 
            +
                # model_path_a = model_paths['Organ Archive']
         | 
| 138 | 
            +
                # model_path_b = model_paths['Multimbral Guitar']
         | 
| 139 | 
            +
                #
         | 
| 140 | 
            +
                # model_path_a = model_paths['String']
         | 
| 141 | 
            +
                # model_path_b = model_paths['Singer']
         | 
| 142 | 
            +
                #
         | 
| 143 | 
            +
                # Examples - All Params Match but get a lower frequency effect
         | 
| 144 | 
            +
                # model_path_a = model_paths['Whale']
         | 
| 145 | 
            +
                # model_path_b = model_paths['Water']
         | 
| 146 | 
            +
             | 
| 147 | 
            +
             | 
| 148 | 
            +
                #####################################
         | 
| 149 | 
            +
                # Set biases between -1 and 1 to bias the result towards one of the models
         | 
| 150 | 
            +
                #   0 = no bias; >0  biased towards model_a; <0 = biased towards  model_b
         | 
| 151 | 
            +
                #####################################
         | 
| 152 | 
            +
                # Note: multiple biases not implemented for gradio version
         | 
| 153 | 
            +
                biases=[bias]
         | 
| 154 | 
            +
             | 
| 155 | 
            +
                ####################################
         | 
| 156 | 
            +
                # Choose Audio File to encode/decode
         | 
| 157 | 
            +
                #####################################
         | 
| 158 | 
            +
                # audio_file_name = "RJM1240-Gestures.wav"
         | 
| 159 | 
            +
                if audio_file is None:
         | 
| 160 | 
            +
                    audio_file = os.path.join('assets', audio_file_name)
         | 
| 161 | 
            +
                # print("Audio File Name:", audio_file_name)
         | 
| 162 | 
            +
             | 
| 163 | 
            +
             | 
| 164 | 
            +
                ####################################
         | 
| 165 | 
            +
                # Generate Audio Files
         | 
| 166 | 
            +
                # Audio files are created in the assets folder
         | 
| 167 | 
            +
                generate_audio_files = False
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                rave_a = torch.jit.load(model_path_a)
         | 
| 170 | 
            +
                rave_b = torch.jit.load(model_path_b)
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                # Let's load a sample audio file
         | 
| 173 | 
            +
                y, sr = librosa.load(audio_file)
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                sr_multiplied = sr * sr_multiple  # Adjust sample rate if needed
         | 
| 176 | 
            +
                print(f"Audio File Loaded: {audio_file}, sample_rate = {sr}")
         | 
| 177 | 
            +
               
         | 
| 178 | 
            +
                # Convert audio to a PyTorch tensor and reshape it to the
         | 
| 179 | 
            +
                # required shape: (batch_size, n_channels, n_samples)
         | 
| 180 | 
            +
                audio = torch.from_numpy(y).float()
         | 
| 181 | 
            +
                audio = audio.reshape(1, 1, -1) 
         | 
| 182 | 
            +
             | 
| 183 | 
            +
                messages={}
         | 
| 184 | 
            +
                audio_outputs={}
         | 
| 185 | 
            +
                for bias in biases:
         | 
| 186 | 
            +
                    # Average the rave models
         | 
| 187 | 
            +
                    # rave_avg, numb_params_mod, numb_params_unable_to_mod = AverageRaveModels(rave_a, rave_b, bias=bias)
         | 
| 188 | 
            +
                    rave_avg, new_msgs = AverageRaveModels(rave_a, rave_b, (-1 * bias))
         | 
| 189 | 
            +
                    messages |= new_msgs 
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                    # no decode the results back to audio
         | 
| 192 | 
            +
                    with torch.no_grad():
         | 
| 193 | 
            +
                        # encode the audio with the new averaged models
         | 
| 194 | 
            +
                        try:
         | 
| 195 | 
            +
                            latent_a = rave_a.encode(audio)
         | 
| 196 | 
            +
                            latent_b = rave_b.encode(audio)
         | 
| 197 | 
            +
                            latent_avg = rave_avg.encode(audio)
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                            # decode individual and averaged models
         | 
| 200 | 
            +
                            decoded_a = rave_a.decode(latent_a)
         | 
| 201 | 
            +
                            decoded_b = rave_b.decode(latent_b)
         | 
| 202 | 
            +
                            decoded_avg = rave_avg.decode(latent_avg)
         | 
| 203 | 
            +
                            audio_outputs[bias] = decoded_avg[0]
         | 
| 204 | 
            +
                        except:
         | 
| 205 | 
            +
                            print(f'Bias {bias} generated an error. Removing it from list of biases.')
         | 
| 206 | 
            +
                            biases.remove(bias)
         | 
| 207 | 
            +
                            # print(biases)
         | 
| 208 | 
            +
                            
         | 
| 209 | 
            +
                    model_a_file=model_path_a.rsplit("/")[-1]
         | 
| 210 | 
            +
                    model_b_file=model_path_b.rsplit("/")[-1]
         | 
| 211 | 
            +
             | 
| 212 | 
            +
                    # Original Audio
         | 
| 213 | 
            +
                    original_audio = (sr, y)
         | 
| 214 | 
            +
             | 
| 215 | 
            +
                    # Decoded Audio
         | 
| 216 | 
            +
                    print("Encoded and Decoded using original models")
         | 
| 217 | 
            +
                    model_a_audio =  (sr, decoded_a[0].detach().numpy().squeeze())
         | 
| 218 | 
            +
                    # saveAudio('assets/' + model_a_file[: 7] + '_only.wav', a)
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                    model_b_audio = (sr, decoded_b[0].detach().numpy().squeeze())
         | 
| 221 | 
            +
                    # # saveAudio('assets/' + model_b_file[: 7] + '_only.wav', a)
         | 
| 222 | 
            +
             | 
| 223 | 
            +
                    print("Encoded and Decoded using Averaged Models")
         | 
| 224 | 
            +
                    print("with Biases: ", biases)
         | 
| 225 | 
            +
                    print("\nNumber of params able to average:", len(messages["keys_averaged"]))
         | 
| 226 | 
            +
                    print("Number of params unable to average:", len(messages["keys_not_averaged"]))
         | 
| 227 | 
            +
             | 
| 228 | 
            +
                    output_file_prefix = f'assets/{model_a_file[: 7]}-{model_b_file[: 7]}_'
         | 
| 229 | 
            +
                    
         | 
| 230 | 
            +
                    bias = biases[0]
         | 
| 231 | 
            +
                    averaged_audio = (sr_multiplied, audio_outputs[bias].detach().numpy().squeeze()) 
         | 
| 232 | 
            +
                    
         | 
| 233 | 
            +
                    df_averaged = pd.DataFrame(messages['keys_averaged']).transpose() #reset_index(names='Param Key')
         | 
| 234 | 
            +
                    df_averaged.columns=['Param Name', 'Model A Shape', 'Model B Shape', 'Errors']
         | 
| 235 | 
            +
                    
         | 
| 236 | 
            +
                    df_not_averaged = pd.DataFrame(messages["keys_not_averaged"]).transpose()
         | 
| 237 | 
            +
                    
         | 
| 238 | 
            +
                    # case when all params are averaged
         | 
| 239 | 
            +
                    if len(df_not_averaged.columns) == 0:
         | 
| 240 | 
            +
                        data = {'Param Name': [], 'Modeal A Shape': [], 'Model B Shape': [], 'Errors': []}
         | 
| 241 | 
            +
                        df_not_averaged = pd.DataFrame(data)
         | 
| 242 | 
            +
                
         | 
| 243 | 
            +
                    df_not_averaged.columns=['Param Name', 'Model A Shape', 'Model B Shape', 'Errors']
         | 
| 244 | 
            +
             | 
| 245 | 
            +
                    messages["stats"] = f"Model A: {model_name_a}\nModel B: {model_name_b}\nAudio file: {os.path.basename(audio_file)}\nSample Rate Multiple for Averaged Version: {sr_multiple}\n\n" + messages["stats"]
         | 
| 246 | 
            +
                    
         | 
| 247 | 
            +
                    return original_audio, model_a_audio, model_b_audio, averaged_audio, messages["stats"], df_averaged, df_not_averaged
         | 
| 248 | 
            +
                    
         | 
| 249 | 
            +
            import gradio as gr
         | 
| 250 | 
            +
             | 
| 251 | 
            +
            waveform_options = gr.WaveformOptions(waveform_color="#01C6FF", 
         | 
| 252 | 
            +
                                                                 waveform_progress_color="#0066B4",
         | 
| 253 | 
            +
                                                                 skip_length=2,)
         | 
| 254 | 
            +
            column_widths=['35%', '20%', '20%', '25%']
         | 
| 255 | 
            +
             | 
| 256 | 
            +
            AverageModels = gr.Interface(title="Process Audio Through Averaged Models.",
         | 
| 257 | 
            +
                fn=GenerateRaveEncDecAudio,
         | 
| 258 | 
            +
                inputs=[
         | 
| 259 | 
            +
                    gr.Radio(model_path_config_keys, label="Select Model A", value="Multimbral Guitar", container=True),
         | 
| 260 | 
            +
                    gr.Radio(model_path_config_keys, label="Select Model B", value="Water", container=True),
         | 
| 261 | 
            +
                    gr.Dropdown(available_audio_files, label="Select from these audio files or upload your own below:", value="SilverCaneAbbey-Voices.wav",container=True),
         | 
| 262 | 
            +
                    gr.Audio(label="Upload an audio file (wav)", type="filepath", sources=["upload", "microphone"], max_length=60,
         | 
| 263 | 
            +
                            waveform_options=waveform_options, format='wav'),
         | 
| 264 | 
            +
                    gr.Radio([.2, .5, .75, 1, 2, 4], label="Sample Rate Multiple (Averaged version only)", value=1, container=True),
         | 
| 265 | 
            +
                    gr.Slider(label="Bias towards Model A or B", minimum=-1, maximum=1, value=0, step=0.1, container=True),
         | 
| 266 | 
            +
                    
         | 
| 267 | 
            +
                    ],
         | 
| 268 | 
            +
                # if no way to pass dictionary, pass separate keys and values and zip them.
         | 
| 269 | 
            +
                outputs=[
         | 
| 270 | 
            +
                    gr.Audio(label="Original Audio", sources=None, waveform_options=waveform_options, interactive=False),
         | 
| 271 | 
            +
                    gr.Audio(label="Encoded/Decoded through Model A", sources=None, waveform_options=waveform_options,),
         | 
| 272 | 
            +
                    gr.Audio(label="Encoded/Decoded through Model B", sources=None, waveform_options=waveform_options,),
         | 
| 273 | 
            +
                    gr.Audio(label="Encoded/Decoded through averaged model", sources=None, waveform_options=waveform_options,),
         | 
| 274 | 
            +
                    gr.Textbox(label="Stats"),
         | 
| 275 | 
            +
                    gr.Dataframe(label="Params Averaged", show_copy_button="True", scale=100, column_widths=column_widths, headers=['Param Name', 'Model A Shape', 'Model B Shape', 'Errors']),
         | 
| 276 | 
            +
                    gr.Dataframe(label="Params Not Averaged", show_copy_button="True", scale=100, column_widths=column_widths, headers=['Param Name', 'Model A Shape', 'Model B Shape', 'Errors'])
         | 
| 277 | 
            +
                    ]
         | 
| 278 | 
            +
                ,fill_width=True
         | 
| 279 | 
            +
            )
         | 
| 280 | 
            +
             | 
| 281 | 
            +
            AverageModels.launch(max_file_size=10 * gr.FileSize.MB, share=True)
         | 
    	
        assets/.DS_Store
    ADDED
    
    | Binary file (6.15 kB). View file | 
|  | 
    	
        assets/BirdCalls.mp3
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:d38844d6abf337397f58fe4abb33e97a805ab33c570856da6cbeec5e4b3ce6d3
         | 
| 3 | 
            +
            size 1054464
         | 
    	
        assets/Chimes.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:01f3b697316f78e2fdaa4584fa25cdf66c9e0a2c6a7504e9a7a9cedc8e30a596
         | 
| 3 | 
            +
            size 4267712
         | 
    	
        assets/FrenchChildren.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:a50753cf3d99baa5ebdefda6972b8f112c39b30eabee74f7b5b1da9c65cd3e2c
         | 
| 3 | 
            +
            size 2712908
         | 
    	
        assets/GesturesPercStrings.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:d503a73fcb5223744ed421de6d5842945ddc5fcebf6ba5077954854e44e697d1
         | 
| 3 | 
            +
            size 9817514
         | 
    	
        assets/Organ-ND.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:20d16777d58088f5e7c314bccbff40a142ed54481decbeb0c33f001aef1adbc2
         | 
| 3 | 
            +
            size 7310666
         | 
    	
        assets/SilverCaneAbbey-Voices.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:5a142c9f0e3783e8930d4df3b481a83d2753c97489fc4031b983fbebece2afbf
         | 
| 3 | 
            +
            size 2790688
         | 
    	
        assets/SingingBowl-OmniMic.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:8dc0800e4d28b98928f8c8552bfa53f0e57e80ee0a050de78353fdeb2472bc3b
         | 
| 3 | 
            +
            size 3677380
         | 
    	
        assets/SpigotsOfChateauLEtoge.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f7b5fe1427a61265dbaae9724478b9512b538799b589180ceea900d9051e03c8
         | 
| 3 | 
            +
            size 4332398
         | 
    	
        assets/Stylophone.wav
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:419ae9cd13e815ecdab0400c81b744c716346fe9aa9afec0d28a66892ceabbcb
         | 
| 3 | 
            +
            size 3851504
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,10 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ipykernel==6.29.5
         | 
| 2 | 
            +
            numpy==2.2.5
         | 
| 3 | 
            +
            transformers==4.51.3
         | 
| 4 | 
            +
            torch==2.7.0
         | 
| 5 | 
            +
            torchaudio==2.7.0
         | 
| 6 | 
            +
            librosa==0.11.0
         | 
| 7 | 
            +
            torchinfo @ git+https://github.com/lancelotblanchard/torchinfo@87dd4eb
         | 
| 8 | 
            +
            pandas==2.2.3
         | 
| 9 | 
            +
            ffmpeg=1.4
         | 
| 10 | 
            +
            ffprobe=0.5
         |