krisliu commited on
Commit
deb8d0e
·
1 Parent(s): 682b34c

add effiLLaMA

Browse files
Files changed (13) hide show
  1. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/consolidated.00-of-01.model.pth +3 -0
  2. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/consolidated.00-of-01.optimizer.pth +3 -0
  3. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/consolidated.00-of-01.other.pth +3 -0
  4. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00000-of-00008.pth +3 -0
  5. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00001-of-00008.pth +3 -0
  6. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00002-of-00008.pth +3 -0
  7. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00003-of-00008.pth +3 -0
  8. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00004-of-00008.pth +3 -0
  9. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00005-of-00008.pth +3 -0
  10. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00006-of-00008.pth +3 -0
  11. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00007-of-00008.pth +3 -0
  12. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/log.txt +4 -0
  13. finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/output.log +0 -0
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/consolidated.00-of-01.model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb3f6940d0daec67c60168040ac04802c5e5f944773b318b9044852e0343bdb
3
+ size 14739226267
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/consolidated.00-of-01.optimizer.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee9d314122e000a820f3a68577d281120b010c2f88ae2e81aa1e00789a2496d9
3
+ size 5050907275
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/consolidated.00-of-01.other.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c845855c5a26f43868bfccd4e1928ed806ed04351061e2d222f95562057d18
3
+ size 1751
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00000-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec2932635da1a4de71c34aa8fcbcba91dfb0ac1ddc7859f8f87280546b7e786a
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00001-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88973b3c418b507bcde1467ec3902218b83d95fe4e022aca11b09c3f86cde7ac
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00002-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eee15a274ea5f27c0360c85bd878d6e0f2072076cae26311c52798f7d836643a
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00003-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61651d612914693bf494e5609388a6f9239090c45b3abcc9c4fa5c7a814c7a7e
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00004-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd6ad8f3d2bcfa25c957717227143e64751970f9b367b28b205a5084a8f476a
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00005-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf049e1944a87da00e6860d1884d0eb312dc5a389a832a4e76a582493ec26972
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00006-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8174e84cf8a0553f73baf42bd13d65974b85944a834fa7f75433c0be044e2f04
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/epoch3/rank-specific-00007-of-00008.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb6f9198ace60febfc0ad5d85588a3d4021799762f521c1a6b87adc99c8889ce
3
+ size 537
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/log.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {"train_lr": 4.993842364532024e-05, "train_closs": 0.9764234678855044, "train_grad_norm": 1.8961609010332323, "epoch": 0, "val_lr": 4.993842364532024e-05, "val_closs": 0.9764234678855044, "val_grad_norm": 1.8961609010332323}
2
+ {"train_lr": 9.179680273851965e-05, "train_closs": 0.8317723872374888, "train_grad_norm": 1.3341844848958142, "epoch": 1, "val_lr": 9.179680273851965e-05, "val_closs": 0.8317723872374888, "val_grad_norm": 1.3341844848958142}
3
+ {"train_lr": 5.252924876847267e-05, "train_closs": 0.5327783184412362, "train_grad_norm": 1.308901717069701, "epoch": 2, "val_lr": 5.252924876847267e-05, "val_closs": 0.5327783184412362, "val_grad_norm": 1.308901717069701}
4
+ {"train_lr": 1.3232446029953387e-05, "train_closs": 0.28212447010453284, "train_grad_norm": 1.1925034630915214, "epoch": 3, "val_lr": 1.3232446029953387e-05, "val_closs": 0.28212447010453284, "val_grad_norm": 1.1925034630915214}
finetune/sg/effiLLaMA/alpaca_llamaPeft_normBiasLora_r512_qkvo_learninit_7B/output.log ADDED
The diff for this file is too large to render. See raw diff