-
Notifications
You must be signed in to change notification settings - Fork 23
Expand file tree
/
Copy pathlangdrive.yaml
More file actions
51 lines (46 loc) · 1.26 KB
/
langdrive.yaml
File metadata and controls
51 lines (46 loc) · 1.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
verbose: true
firestore:
clientJson: "secrets/firebase_service_client.json"
databaseURL: "env:FIREBASE_DATABASE_URL"
gdrive:
appType: "desktop"
keyFilePath: "secrets/google_desktop_client.json"
tokenFilePath: "secrets/google_desktop_token.json"
temp: 'getFileByName - filename, mimeType, directory, directoryId'
scopes:
- "https://www.googleapis.com/auth/drive"
- "https://www.googleapis.com/auth/drive.metadata.readonly"
email:
password: env:GMAIL_PASSWORD
email: env:GMAIL_EMAIL
huggingface:
hfToken: env:HUGGINGFACE_API_KEY
baseModel: vilsonrodrigues/falcon-7b-instruct-sharded
hfModelPath: karpathic/falcon-7b-instruct-tuned
deployToHf: true
hfTrainPath: karpathic/auto-trainer
isPrivate: true
train:
input:
path: ./tests/midjourney_prompt.csv
value: input
output:
path: ./tests/midjourney_prompt.csv
outputValue: output
settings:
PROJECT_NAME: my_test_llm
MODEL_NAME: abhishek/llama-2-7b-hf-small-shards
PUSH_TO_HUB: false
LEARNING_RATE: 2e-4
NUM_EPOCHS: 1
BATCH_SIZE: 1
BLOCK_SIZE: 1024
WARMUP_RATIO: 0.1
WEIGHT_DECAY: 0.01
GRADIENT_ACCUMULATION: 4
MIXED_PRECISION: fp16
PEFT: true
QUANTIZATION: int4
LORA_R: 16
LORA_ALPHA: 32
LORA_DROPOUT: 0.05