Upload Gemma2ForCausalLM
Browse files- README.md +3 -4
- config.json +49 -0
- generation_config.json +8 -0
- model-00001-of-00003.safetensors +3 -0
- model-00002-of-00003.safetensors +3 -0
- model-00003-of-00003.safetensors +3 -0
- model.safetensors.index.json +0 -0
    	
        README.md
    CHANGED
    
    | @@ -1,11 +1,10 @@ | |
| 1 | 
             
            ---
         | 
| 2 | 
            -
            license: gemma
         | 
| 3 | 
             
            library_name: transformers
         | 
|  | |
| 4 | 
             
            pipeline_tag: text-generation
         | 
| 5 | 
             
            extra_gated_heading: Access Gemma on Hugging Face
         | 
| 6 | 
            -
            extra_gated_prompt:  | 
| 7 | 
            -
               | 
| 8 | 
            -
              Google’s usage license. To do this, please ensure you’re logged in to Hugging
         | 
| 9 | 
             
              Face and click below. Requests are processed immediately.
         | 
| 10 | 
             
            ---
         | 
| 11 |  | 
|  | |
| 1 | 
             
            ---
         | 
|  | |
| 2 | 
             
            library_name: transformers
         | 
| 3 | 
            +
            license: gemma
         | 
| 4 | 
             
            pipeline_tag: text-generation
         | 
| 5 | 
             
            extra_gated_heading: Access Gemma on Hugging Face
         | 
| 6 | 
            +
            extra_gated_prompt: To access Gemma on Hugging Face, you’re required to review and
         | 
| 7 | 
            +
              agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging
         | 
|  | |
| 8 | 
             
              Face and click below. Requests are processed immediately.
         | 
| 9 | 
             
            ---
         | 
| 10 |  | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,49 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "google/gemma-2-27b-it",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "Gemma2ForCausalLM"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_bias": false,
         | 
| 7 | 
            +
              "attention_dropout": 0.0,
         | 
| 8 | 
            +
              "attn_logit_softcapping": 50.0,
         | 
| 9 | 
            +
              "bos_token_id": 2,
         | 
| 10 | 
            +
              "cache_implementation": "hybrid",
         | 
| 11 | 
            +
              "eos_token_id": 1,
         | 
| 12 | 
            +
              "final_logit_softcapping": 30.0,
         | 
| 13 | 
            +
              "head_dim": 128,
         | 
| 14 | 
            +
              "hidden_act": "gelu_pytorch_tanh",
         | 
| 15 | 
            +
              "hidden_activation": "gelu_pytorch_tanh",
         | 
| 16 | 
            +
              "hidden_size": 4608,
         | 
| 17 | 
            +
              "initializer_range": 0.02,
         | 
| 18 | 
            +
              "intermediate_size": 36864,
         | 
| 19 | 
            +
              "max_position_embeddings": 8192,
         | 
| 20 | 
            +
              "model_type": "gemma2",
         | 
| 21 | 
            +
              "num_attention_heads": 32,
         | 
| 22 | 
            +
              "num_hidden_layers": 46,
         | 
| 23 | 
            +
              "num_key_value_heads": 16,
         | 
| 24 | 
            +
              "pad_token_id": 0,
         | 
| 25 | 
            +
              "quantization_config": {
         | 
| 26 | 
            +
                "_load_in_4bit": true,
         | 
| 27 | 
            +
                "_load_in_8bit": false,
         | 
| 28 | 
            +
                "bnb_4bit_compute_dtype": "bfloat16",
         | 
| 29 | 
            +
                "bnb_4bit_quant_storage": "uint8",
         | 
| 30 | 
            +
                "bnb_4bit_quant_type": "nf4",
         | 
| 31 | 
            +
                "bnb_4bit_use_double_quant": true,
         | 
| 32 | 
            +
                "llm_int8_enable_fp32_cpu_offload": false,
         | 
| 33 | 
            +
                "llm_int8_has_fp16_weight": false,
         | 
| 34 | 
            +
                "llm_int8_skip_modules": null,
         | 
| 35 | 
            +
                "llm_int8_threshold": 6.0,
         | 
| 36 | 
            +
                "load_in_4bit": true,
         | 
| 37 | 
            +
                "load_in_8bit": false,
         | 
| 38 | 
            +
                "quant_method": "bitsandbytes"
         | 
| 39 | 
            +
              },
         | 
| 40 | 
            +
              "query_pre_attn_scalar": 144,
         | 
| 41 | 
            +
              "rms_norm_eps": 1e-06,
         | 
| 42 | 
            +
              "rope_theta": 10000.0,
         | 
| 43 | 
            +
              "sliding_window": 4096,
         | 
| 44 | 
            +
              "sliding_window_size": 4096,
         | 
| 45 | 
            +
              "torch_dtype": "bfloat16",
         | 
| 46 | 
            +
              "transformers_version": "4.42.3",
         | 
| 47 | 
            +
              "use_cache": true,
         | 
| 48 | 
            +
              "vocab_size": 256000
         | 
| 49 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_from_model_config": true,
         | 
| 3 | 
            +
              "bos_token_id": 2,
         | 
| 4 | 
            +
              "cache_implementation": "hybrid",
         | 
| 5 | 
            +
              "eos_token_id": 1,
         | 
| 6 | 
            +
              "pad_token_id": 0,
         | 
| 7 | 
            +
              "transformers_version": "4.42.3"
         | 
| 8 | 
            +
            }
         | 
    	
        model-00001-of-00003.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:68f61613c0538726a289b1dfd559b5c8fe2143f1759818411e70c28a4f72f24e
         | 
| 3 | 
            +
            size 5981974007
         | 
    	
        model-00002-of-00003.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:03e27998fa9fb6ba94e9b7f1eccf088acd6071bbe97a3c47d5fc287946795fa9
         | 
| 3 | 
            +
            size 5930678517
         | 
    	
        model-00003-of-00003.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:41d77632277218a47c8ed9df1ad1fef2d4759e3c907e261ea02724e0000fc82f
         | 
| 3 | 
            +
            size 3885658823
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  |