Upload folder using huggingface_hub
Browse files- feature_extractor_clip/preprocessor_config.json +27 -0
- feature_extractor_vae/preprocessor_config.json +19 -0
- model_index.json +119 -0
- scheduler/scheduler_config.json +14 -0
- text_encoder/config.json +25 -0
- text_encoder/pytorch_model.bin +3 -0
- tokenizer/merges.txt +0 -0
- tokenizer/special_tokens_map.json +24 -0
- tokenizer/tokenizer_config.json +33 -0
- tokenizer/vocab.json +0 -0
- unet/config.json +67 -0
- unet/diffusion_pytorch_model.bin +3 -0
- vae/config.json +31 -0
- vae/diffusion_pytorch_model.bin +3 -0
- vision_encoder/config.json +23 -0
- vision_encoder/pytorch_model.bin +3 -0
    	
        feature_extractor_clip/preprocessor_config.json
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "crop_size": {
         | 
| 3 | 
            +
                "height": 224,
         | 
| 4 | 
            +
                "width": 224
         | 
| 5 | 
            +
              },
         | 
| 6 | 
            +
              "do_center_crop": true,
         | 
| 7 | 
            +
              "do_convert_rgb": true,
         | 
| 8 | 
            +
              "do_normalize": true,
         | 
| 9 | 
            +
              "do_rescale": true,
         | 
| 10 | 
            +
              "do_resize": true,
         | 
| 11 | 
            +
              "image_mean": [
         | 
| 12 | 
            +
                0.48145466,
         | 
| 13 | 
            +
                0.4578275,
         | 
| 14 | 
            +
                0.40821073
         | 
| 15 | 
            +
              ],
         | 
| 16 | 
            +
              "image_processor_type": "CLIPImageProcessor",
         | 
| 17 | 
            +
              "image_std": [
         | 
| 18 | 
            +
                0.26862954,
         | 
| 19 | 
            +
                0.26130258,
         | 
| 20 | 
            +
                0.27577711
         | 
| 21 | 
            +
              ],
         | 
| 22 | 
            +
              "resample": 3,
         | 
| 23 | 
            +
              "rescale_factor": 0.00392156862745098,
         | 
| 24 | 
            +
              "size": {
         | 
| 25 | 
            +
                "shortest_edge": 224
         | 
| 26 | 
            +
              }
         | 
| 27 | 
            +
            }
         | 
    	
        feature_extractor_vae/preprocessor_config.json
    ADDED
    
    | @@ -0,0 +1,19 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "crop_size": {
         | 
| 3 | 
            +
                "height": 512,
         | 
| 4 | 
            +
                "width": 512
         | 
| 5 | 
            +
              },
         | 
| 6 | 
            +
              "do_center_crop": true,
         | 
| 7 | 
            +
              "do_convert_rgb": true,
         | 
| 8 | 
            +
              "do_normalize": true,
         | 
| 9 | 
            +
              "do_rescale": true,
         | 
| 10 | 
            +
              "do_resize": true,
         | 
| 11 | 
            +
              "image_mean": 0.5,
         | 
| 12 | 
            +
              "image_processor_type": "CLIPImageProcessor",
         | 
| 13 | 
            +
              "image_std": 0.8,
         | 
| 14 | 
            +
              "resample": 2,
         | 
| 15 | 
            +
              "rescale_factor": 0.00392156862745098,
         | 
| 16 | 
            +
              "size": {
         | 
| 17 | 
            +
                "shortest_edge": 512
         | 
| 18 | 
            +
              }
         | 
| 19 | 
            +
            }
         | 
    	
        model_index.json
    ADDED
    
    | @@ -0,0 +1,119 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_class_name": "Zero123PlusPipeline",
         | 
| 3 | 
            +
              "_diffusers_version": "0.17.1",
         | 
| 4 | 
            +
              "feature_extractor_clip": [
         | 
| 5 | 
            +
                "transformers",
         | 
| 6 | 
            +
                "CLIPImageProcessor"
         | 
| 7 | 
            +
              ],
         | 
| 8 | 
            +
              "feature_extractor_vae": [
         | 
| 9 | 
            +
                "transformers",
         | 
| 10 | 
            +
                "CLIPImageProcessor"
         | 
| 11 | 
            +
              ],
         | 
| 12 | 
            +
              "ramping_coefficients": [
         | 
| 13 | 
            +
                0.0,
         | 
| 14 | 
            +
                0.2060057818889618,
         | 
| 15 | 
            +
                0.18684479594230652,
         | 
| 16 | 
            +
                0.24342191219329834,
         | 
| 17 | 
            +
                0.18507817387580872,
         | 
| 18 | 
            +
                0.1703828126192093,
         | 
| 19 | 
            +
                0.15628913044929504,
         | 
| 20 | 
            +
                0.14174538850784302,
         | 
| 21 | 
            +
                0.13617539405822754,
         | 
| 22 | 
            +
                0.13569170236587524,
         | 
| 23 | 
            +
                0.1269884556531906,
         | 
| 24 | 
            +
                0.1200924888253212,
         | 
| 25 | 
            +
                0.12816639244556427,
         | 
| 26 | 
            +
                0.13058121502399445,
         | 
| 27 | 
            +
                0.14201879501342773,
         | 
| 28 | 
            +
                0.15004529058933258,
         | 
| 29 | 
            +
                0.1620427817106247,
         | 
| 30 | 
            +
                0.17207716405391693,
         | 
| 31 | 
            +
                0.18534132838249207,
         | 
| 32 | 
            +
                0.20002241432666779,
         | 
| 33 | 
            +
                0.21657466888427734,
         | 
| 34 | 
            +
                0.22996725142002106,
         | 
| 35 | 
            +
                0.24613411724567413,
         | 
| 36 | 
            +
                0.25141021609306335,
         | 
| 37 | 
            +
                0.26613450050354004,
         | 
| 38 | 
            +
                0.271847128868103,
         | 
| 39 | 
            +
                0.2850190997123718,
         | 
| 40 | 
            +
                0.285749226808548,
         | 
| 41 | 
            +
                0.2813953757286072,
         | 
| 42 | 
            +
                0.29509517550468445,
         | 
| 43 | 
            +
                0.30109965801239014,
         | 
| 44 | 
            +
                0.31370124220848083,
         | 
| 45 | 
            +
                0.3134534955024719,
         | 
| 46 | 
            +
                0.3108579218387604,
         | 
| 47 | 
            +
                0.32147032022476196,
         | 
| 48 | 
            +
                0.33548328280448914,
         | 
| 49 | 
            +
                0.3301997184753418,
         | 
| 50 | 
            +
                0.3254660964012146,
         | 
| 51 | 
            +
                0.3514464199542999,
         | 
| 52 | 
            +
                0.35993096232414246,
         | 
| 53 | 
            +
                0.3510829508304596,
         | 
| 54 | 
            +
                0.37661612033843994,
         | 
| 55 | 
            +
                0.3913513123989105,
         | 
| 56 | 
            +
                0.42122599482536316,
         | 
| 57 | 
            +
                0.3954688012599945,
         | 
| 58 | 
            +
                0.4260983467102051,
         | 
| 59 | 
            +
                0.479139506816864,
         | 
| 60 | 
            +
                0.4588979482650757,
         | 
| 61 | 
            +
                0.4873477816581726,
         | 
| 62 | 
            +
                0.5095643401145935,
         | 
| 63 | 
            +
                0.5133851170539856,
         | 
| 64 | 
            +
                0.520708441734314,
         | 
| 65 | 
            +
                0.5363377928733826,
         | 
| 66 | 
            +
                0.5661528706550598,
         | 
| 67 | 
            +
                0.5859065651893616,
         | 
| 68 | 
            +
                0.6207258701324463,
         | 
| 69 | 
            +
                0.6560986638069153,
         | 
| 70 | 
            +
                0.6379964351654053,
         | 
| 71 | 
            +
                0.6777164340019226,
         | 
| 72 | 
            +
                0.6589891910552979,
         | 
| 73 | 
            +
                0.7574057579040527,
         | 
| 74 | 
            +
                0.7446827292442322,
         | 
| 75 | 
            +
                0.7695522308349609,
         | 
| 76 | 
            +
                0.8163619041442871,
         | 
| 77 | 
            +
                0.9502472281455994,
         | 
| 78 | 
            +
                0.9918442368507385,
         | 
| 79 | 
            +
                0.9398387670516968,
         | 
| 80 | 
            +
                1.005432367324829,
         | 
| 81 | 
            +
                0.9295969605445862,
         | 
| 82 | 
            +
                0.9899859428405762,
         | 
| 83 | 
            +
                1.044832706451416,
         | 
| 84 | 
            +
                1.0427014827728271,
         | 
| 85 | 
            +
                1.0829696655273438,
         | 
| 86 | 
            +
                1.0062562227249146,
         | 
| 87 | 
            +
                1.0966323614120483,
         | 
| 88 | 
            +
                1.0550328493118286,
         | 
| 89 | 
            +
                1.2108079195022583
         | 
| 90 | 
            +
              ],
         | 
| 91 | 
            +
              "safety_checker": [
         | 
| 92 | 
            +
                null,
         | 
| 93 | 
            +
                null
         | 
| 94 | 
            +
              ],
         | 
| 95 | 
            +
              "scheduler": [
         | 
| 96 | 
            +
                "diffusers",
         | 
| 97 | 
            +
                "EulerAncestralDiscreteScheduler"
         | 
| 98 | 
            +
              ],
         | 
| 99 | 
            +
              "text_encoder": [
         | 
| 100 | 
            +
                "transformers",
         | 
| 101 | 
            +
                "CLIPTextModel"
         | 
| 102 | 
            +
              ],
         | 
| 103 | 
            +
              "tokenizer": [
         | 
| 104 | 
            +
                "transformers",
         | 
| 105 | 
            +
                "CLIPTokenizer"
         | 
| 106 | 
            +
              ],
         | 
| 107 | 
            +
              "unet": [
         | 
| 108 | 
            +
                "diffusers",
         | 
| 109 | 
            +
                "UNet2DConditionModel"
         | 
| 110 | 
            +
              ],
         | 
| 111 | 
            +
              "vae": [
         | 
| 112 | 
            +
                "diffusers",
         | 
| 113 | 
            +
                "AutoencoderKL"
         | 
| 114 | 
            +
              ],
         | 
| 115 | 
            +
              "vision_encoder": [
         | 
| 116 | 
            +
                "transformers",
         | 
| 117 | 
            +
                "CLIPVisionModelWithProjection"
         | 
| 118 | 
            +
              ]
         | 
| 119 | 
            +
            }
         | 
    	
        scheduler/scheduler_config.json
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_class_name": "EulerAncestralDiscreteScheduler",
         | 
| 3 | 
            +
              "_diffusers_version": "0.17.1",
         | 
| 4 | 
            +
              "beta_end": 0.012,
         | 
| 5 | 
            +
              "beta_schedule": "linear",
         | 
| 6 | 
            +
              "beta_start": 0.00085,
         | 
| 7 | 
            +
              "clip_sample": false,
         | 
| 8 | 
            +
              "num_train_timesteps": 1000,
         | 
| 9 | 
            +
              "prediction_type": "v_prediction",
         | 
| 10 | 
            +
              "set_alpha_to_one": false,
         | 
| 11 | 
            +
              "skip_prk_steps": true,
         | 
| 12 | 
            +
              "steps_offset": 1,
         | 
| 13 | 
            +
              "trained_betas": null
         | 
| 14 | 
            +
            }
         | 
    	
        text_encoder/config.json
    ADDED
    
    | @@ -0,0 +1,25 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "D:\\.cache\\huggingface\\hub\\models--stabilityai--stable-diffusion-2\\snapshots\\1e128c8891e52218b74cde8f26dbfc701cb99d79\\text_encoder",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "CLIPTextModel"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_dropout": 0.0,
         | 
| 7 | 
            +
              "bos_token_id": 0,
         | 
| 8 | 
            +
              "dropout": 0.0,
         | 
| 9 | 
            +
              "eos_token_id": 2,
         | 
| 10 | 
            +
              "hidden_act": "gelu",
         | 
| 11 | 
            +
              "hidden_size": 1024,
         | 
| 12 | 
            +
              "initializer_factor": 1.0,
         | 
| 13 | 
            +
              "initializer_range": 0.02,
         | 
| 14 | 
            +
              "intermediate_size": 4096,
         | 
| 15 | 
            +
              "layer_norm_eps": 1e-05,
         | 
| 16 | 
            +
              "max_position_embeddings": 77,
         | 
| 17 | 
            +
              "model_type": "clip_text_model",
         | 
| 18 | 
            +
              "num_attention_heads": 16,
         | 
| 19 | 
            +
              "num_hidden_layers": 23,
         | 
| 20 | 
            +
              "pad_token_id": 1,
         | 
| 21 | 
            +
              "projection_dim": 512,
         | 
| 22 | 
            +
              "torch_dtype": "float16",
         | 
| 23 | 
            +
              "transformers_version": "4.29.0",
         | 
| 24 | 
            +
              "vocab_size": 49408
         | 
| 25 | 
            +
            }
         | 
    	
        text_encoder/pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f2a06cf32cf585d03b55fef302142a5321b761ec440113925f64f4ceaffc7730
         | 
| 3 | 
            +
            size 680904225
         | 
    	
        tokenizer/merges.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer/special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": {
         | 
| 3 | 
            +
                "content": "<|startoftext|>",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": true,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "eos_token": {
         | 
| 10 | 
            +
                "content": "<|endoftext|>",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": true,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": "!",
         | 
| 17 | 
            +
              "unk_token": {
         | 
| 18 | 
            +
                "content": "<|endoftext|>",
         | 
| 19 | 
            +
                "lstrip": false,
         | 
| 20 | 
            +
                "normalized": true,
         | 
| 21 | 
            +
                "rstrip": false,
         | 
| 22 | 
            +
                "single_word": false
         | 
| 23 | 
            +
              }
         | 
| 24 | 
            +
            }
         | 
    	
        tokenizer/tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,33 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_prefix_space": false,
         | 
| 3 | 
            +
              "bos_token": {
         | 
| 4 | 
            +
                "__type": "AddedToken",
         | 
| 5 | 
            +
                "content": "<|startoftext|>",
         | 
| 6 | 
            +
                "lstrip": false,
         | 
| 7 | 
            +
                "normalized": true,
         | 
| 8 | 
            +
                "rstrip": false,
         | 
| 9 | 
            +
                "single_word": false
         | 
| 10 | 
            +
              },
         | 
| 11 | 
            +
              "clean_up_tokenization_spaces": true,
         | 
| 12 | 
            +
              "do_lower_case": true,
         | 
| 13 | 
            +
              "eos_token": {
         | 
| 14 | 
            +
                "__type": "AddedToken",
         | 
| 15 | 
            +
                "content": "<|endoftext|>",
         | 
| 16 | 
            +
                "lstrip": false,
         | 
| 17 | 
            +
                "normalized": true,
         | 
| 18 | 
            +
                "rstrip": false,
         | 
| 19 | 
            +
                "single_word": false
         | 
| 20 | 
            +
              },
         | 
| 21 | 
            +
              "errors": "replace",
         | 
| 22 | 
            +
              "model_max_length": 77,
         | 
| 23 | 
            +
              "pad_token": "<|endoftext|>",
         | 
| 24 | 
            +
              "tokenizer_class": "CLIPTokenizer",
         | 
| 25 | 
            +
              "unk_token": {
         | 
| 26 | 
            +
                "__type": "AddedToken",
         | 
| 27 | 
            +
                "content": "<|endoftext|>",
         | 
| 28 | 
            +
                "lstrip": false,
         | 
| 29 | 
            +
                "normalized": true,
         | 
| 30 | 
            +
                "rstrip": false,
         | 
| 31 | 
            +
                "single_word": false
         | 
| 32 | 
            +
              }
         | 
| 33 | 
            +
            }
         | 
    	
        tokenizer/vocab.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        unet/config.json
    ADDED
    
    | @@ -0,0 +1,67 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_class_name": "UNet2DConditionModel",
         | 
| 3 | 
            +
              "_diffusers_version": "0.17.1",
         | 
| 4 | 
            +
              "_name_or_path": "D:\\.cache\\huggingface\\hub\\models--stabilityai--stable-diffusion-2\\snapshots\\1e128c8891e52218b74cde8f26dbfc701cb99d79\\unet",
         | 
| 5 | 
            +
              "act_fn": "silu",
         | 
| 6 | 
            +
              "addition_embed_type": null,
         | 
| 7 | 
            +
              "addition_embed_type_num_heads": 64,
         | 
| 8 | 
            +
              "attention_head_dim": [
         | 
| 9 | 
            +
                5,
         | 
| 10 | 
            +
                10,
         | 
| 11 | 
            +
                20,
         | 
| 12 | 
            +
                20
         | 
| 13 | 
            +
              ],
         | 
| 14 | 
            +
              "block_out_channels": [
         | 
| 15 | 
            +
                320,
         | 
| 16 | 
            +
                640,
         | 
| 17 | 
            +
                1280,
         | 
| 18 | 
            +
                1280
         | 
| 19 | 
            +
              ],
         | 
| 20 | 
            +
              "center_input_sample": false,
         | 
| 21 | 
            +
              "class_embed_type": null,
         | 
| 22 | 
            +
              "class_embeddings_concat": false,
         | 
| 23 | 
            +
              "conv_in_kernel": 3,
         | 
| 24 | 
            +
              "conv_out_kernel": 3,
         | 
| 25 | 
            +
              "cross_attention_dim": 1024,
         | 
| 26 | 
            +
              "cross_attention_norm": null,
         | 
| 27 | 
            +
              "down_block_types": [
         | 
| 28 | 
            +
                "CrossAttnDownBlock2D",
         | 
| 29 | 
            +
                "CrossAttnDownBlock2D",
         | 
| 30 | 
            +
                "CrossAttnDownBlock2D",
         | 
| 31 | 
            +
                "DownBlock2D"
         | 
| 32 | 
            +
              ],
         | 
| 33 | 
            +
              "downsample_padding": 1,
         | 
| 34 | 
            +
              "dual_cross_attention": false,
         | 
| 35 | 
            +
              "encoder_hid_dim": null,
         | 
| 36 | 
            +
              "encoder_hid_dim_type": null,
         | 
| 37 | 
            +
              "flip_sin_to_cos": true,
         | 
| 38 | 
            +
              "freq_shift": 0,
         | 
| 39 | 
            +
              "in_channels": 4,
         | 
| 40 | 
            +
              "layers_per_block": 2,
         | 
| 41 | 
            +
              "mid_block_only_cross_attention": null,
         | 
| 42 | 
            +
              "mid_block_scale_factor": 1,
         | 
| 43 | 
            +
              "mid_block_type": "UNetMidBlock2DCrossAttn",
         | 
| 44 | 
            +
              "norm_eps": 1e-05,
         | 
| 45 | 
            +
              "norm_num_groups": 32,
         | 
| 46 | 
            +
              "num_class_embeds": null,
         | 
| 47 | 
            +
              "only_cross_attention": false,
         | 
| 48 | 
            +
              "out_channels": 4,
         | 
| 49 | 
            +
              "projection_class_embeddings_input_dim": null,
         | 
| 50 | 
            +
              "resnet_out_scale_factor": 1.0,
         | 
| 51 | 
            +
              "resnet_skip_time_act": false,
         | 
| 52 | 
            +
              "resnet_time_scale_shift": "default",
         | 
| 53 | 
            +
              "sample_size": 96,
         | 
| 54 | 
            +
              "time_cond_proj_dim": null,
         | 
| 55 | 
            +
              "time_embedding_act_fn": null,
         | 
| 56 | 
            +
              "time_embedding_dim": null,
         | 
| 57 | 
            +
              "time_embedding_type": "positional",
         | 
| 58 | 
            +
              "timestep_post_act": null,
         | 
| 59 | 
            +
              "up_block_types": [
         | 
| 60 | 
            +
                "UpBlock2D",
         | 
| 61 | 
            +
                "CrossAttnUpBlock2D",
         | 
| 62 | 
            +
                "CrossAttnUpBlock2D",
         | 
| 63 | 
            +
                "CrossAttnUpBlock2D"
         | 
| 64 | 
            +
              ],
         | 
| 65 | 
            +
              "upcast_attention": false,
         | 
| 66 | 
            +
              "use_linear_projection": true
         | 
| 67 | 
            +
            }
         | 
    	
        unet/diffusion_pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:d5dce4ff236a33f9038605fa66b8d9366803ecfc1e896c6fbd9350d9f36c0f11
         | 
| 3 | 
            +
            size 1732113253
         | 
    	
        vae/config.json
    ADDED
    
    | @@ -0,0 +1,31 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_class_name": "AutoencoderKL",
         | 
| 3 | 
            +
              "_diffusers_version": "0.17.1",
         | 
| 4 | 
            +
              "_name_or_path": "stabilityai/sd-vae-ft-mse",
         | 
| 5 | 
            +
              "act_fn": "silu",
         | 
| 6 | 
            +
              "block_out_channels": [
         | 
| 7 | 
            +
                128,
         | 
| 8 | 
            +
                256,
         | 
| 9 | 
            +
                512,
         | 
| 10 | 
            +
                512
         | 
| 11 | 
            +
              ],
         | 
| 12 | 
            +
              "down_block_types": [
         | 
| 13 | 
            +
                "DownEncoderBlock2D",
         | 
| 14 | 
            +
                "DownEncoderBlock2D",
         | 
| 15 | 
            +
                "DownEncoderBlock2D",
         | 
| 16 | 
            +
                "DownEncoderBlock2D"
         | 
| 17 | 
            +
              ],
         | 
| 18 | 
            +
              "in_channels": 3,
         | 
| 19 | 
            +
              "latent_channels": 4,
         | 
| 20 | 
            +
              "layers_per_block": 2,
         | 
| 21 | 
            +
              "norm_num_groups": 32,
         | 
| 22 | 
            +
              "out_channels": 3,
         | 
| 23 | 
            +
              "sample_size": 256,
         | 
| 24 | 
            +
              "scaling_factor": 0.18215,
         | 
| 25 | 
            +
              "up_block_types": [
         | 
| 26 | 
            +
                "UpDecoderBlock2D",
         | 
| 27 | 
            +
                "UpDecoderBlock2D",
         | 
| 28 | 
            +
                "UpDecoderBlock2D",
         | 
| 29 | 
            +
                "UpDecoderBlock2D"
         | 
| 30 | 
            +
              ]
         | 
| 31 | 
            +
            }
         | 
    	
        vae/diffusion_pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:7cfdd672df17db3283633acb3721afc7735927293c2d3bd2bf64939a6dcd950e
         | 
| 3 | 
            +
            size 167407857
         | 
    	
        vision_encoder/config.json
    ADDED
    
    | @@ -0,0 +1,23 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "stabilityai/stable-diffusion-2-1-unclip",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "CLIPVisionModelWithProjection"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_dropout": 0.0,
         | 
| 7 | 
            +
              "dropout": 0.0,
         | 
| 8 | 
            +
              "hidden_act": "gelu",
         | 
| 9 | 
            +
              "hidden_size": 1280,
         | 
| 10 | 
            +
              "image_size": 224,
         | 
| 11 | 
            +
              "initializer_factor": 1.0,
         | 
| 12 | 
            +
              "initializer_range": 0.02,
         | 
| 13 | 
            +
              "intermediate_size": 5120,
         | 
| 14 | 
            +
              "layer_norm_eps": 1e-05,
         | 
| 15 | 
            +
              "model_type": "clip_vision_model",
         | 
| 16 | 
            +
              "num_attention_heads": 16,
         | 
| 17 | 
            +
              "num_channels": 3,
         | 
| 18 | 
            +
              "num_hidden_layers": 32,
         | 
| 19 | 
            +
              "patch_size": 14,
         | 
| 20 | 
            +
              "projection_dim": 1024,
         | 
| 21 | 
            +
              "torch_dtype": "float16",
         | 
| 22 | 
            +
              "transformers_version": "4.29.0"
         | 
| 23 | 
            +
            }
         | 
    	
        vision_encoder/pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0c626d61a7660d2f86a1f0b5f74f513f93789a99469f1af641cc1f77810427f7
         | 
| 3 | 
            +
            size 1264335601
         | 
