Ayushnangia commited on
Commit
9765e4d
·
1 Parent(s): dba2762
Files changed (3) hide show
  1. Lossfunk-Residency-Llama-3-8B-Instruct +1 -0
  2. app.py +1 -12
  3. fun.py +4 -0
Lossfunk-Residency-Llama-3-8B-Instruct ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit af0d94ff4006cb9e9c475a6ed7e3c2f86b8fe370
app.py CHANGED
@@ -3,7 +3,6 @@ import os
3
  import subprocess
4
 
5
  HF_TOKEN = os.environ.get("HF_TOKEN")
6
-
7
  from huggingface_hub import login
8
  login(token=HF_TOKEN)
9
 
@@ -52,18 +51,8 @@ if torch.cuda.is_available():
52
  model = AutoModelForCausalLM.from_pretrained(
53
  model_id, device_map="cuda", torch_dtype=torch.bfloat16
54
  )
55
- # Define local repository path for the ReFT model files
56
- repo_local_path = "./Lossfunk-Residency-Llama-3-8B-Instruct"
57
- if not os.path.exists(repo_local_path):
58
- print("Local repository not found. Cloning repository using git lfs...")
59
- subprocess.run(
60
- ["git", "lfs", "clone", "https://huggingface.co/Ayushnangia/Lossfunk-Residency-Llama-3-8B-Instruct", repo_local_path],
61
- check=True
62
- )
63
- # Ensure all LFS files are pulled
64
- subprocess.run(["git", "lfs", "pull"], cwd=repo_local_path, check=True)
65
 
66
-
67
  # Load the ReFT model from the local repository
68
  reft_model = ReftModel.load(repo_local_path, model, from_huggingface_hub=False)
69
  reft_model.set_device("cuda")
 
3
  import subprocess
4
 
5
  HF_TOKEN = os.environ.get("HF_TOKEN")
 
6
  from huggingface_hub import login
7
  login(token=HF_TOKEN)
8
 
 
51
  model = AutoModelForCausalLM.from_pretrained(
52
  model_id, device_map="cuda", torch_dtype=torch.bfloat16
53
  )
 
 
 
 
 
 
 
 
 
 
54
 
55
+ repo_local_path="./Lossfunk-Residency-Llama-3-8B-Instruct"
56
  # Load the ReFT model from the local repository
57
  reft_model = ReftModel.load(repo_local_path, model, from_huggingface_hub=False)
58
  reft_model.set_device("cuda")
fun.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import torch
2
+ print("CUDA available:", torch.cuda.is_available())
3
+ if torch.cuda.is_available():
4
+ print("GPU name:", torch.cuda.get_device_name(0))