shri-ads commited on
Commit
d7d19a0
·
verified ·
1 Parent(s): 7143590

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -33
  2. README.md +5 -2
  3. generation_config.json +7 -0
  4. tokenizer_config.json +4 -1
.gitattributes CHANGED
@@ -1,36 +1,5 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
 
2
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  *.pt filter=lfs diff=lfs merge=lfs -text
4
+ *.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
5
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,8 @@
1
  ---
2
  language:
3
  - en
 
 
4
  pipeline_tag: text-generation
5
  tags:
6
  - phi4
@@ -29,12 +31,13 @@ base model.
29
  from transformers import AutoModelForCausalLM, AutoTokenizer
30
 
31
  model = AutoModelForCausalLM.from_pretrained(
32
- "shri-ads/phi4-guardrail",
33
  trust_remote_code=True,
34
  token="your_hf_token",
35
  )
36
  tokenizer = AutoTokenizer.from_pretrained(
37
- "microsoft/Phi-4-mini-instruct",
 
38
  token="your_hf_token",
39
  )
40
 
 
1
  ---
2
  language:
3
  - en
4
+ library_name: transformers
5
+ base_model: microsoft/Phi-4-mini-instruct
6
  pipeline_tag: text-generation
7
  tags:
8
  - phi4
 
31
  from transformers import AutoModelForCausalLM, AutoTokenizer
32
 
33
  model = AutoModelForCausalLM.from_pretrained(
34
+ "your-username/phi4-guardrail",
35
  trust_remote_code=True,
36
  token="your_hf_token",
37
  )
38
  tokenizer = AutoTokenizer.from_pretrained(
39
+ "your-username/phi4-guardrail",
40
+ trust_remote_code=True,
41
  token="your_hf_token",
42
  )
43
 
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.44.0",
4
+ "eos_token_id": 199999,
5
+ "pad_token_id": 199999,
6
+ "bos_token_id": 199999
7
+ }
tokenizer_config.json CHANGED
@@ -102,10 +102,13 @@
102
  },
103
  "bos_token": "<|endoftext|>",
104
  "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
 
 
 
105
  "clean_up_tokenization_spaces": false,
106
  "eos_token": "<|endoftext|>",
107
  "model_max_length": 131072,
108
  "pad_token": "<|endoftext|>",
109
- "tokenizer_class": "GPT2Tokenizer",
110
  "unk_token": "<|endoftext|>"
111
  }
 
102
  },
103
  "bos_token": "<|endoftext|>",
104
  "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
105
+ "auto_map": {
106
+ "AutoTokenizer": ["tokenization_sentinel.SentinelTokenizer", null]
107
+ },
108
  "clean_up_tokenization_spaces": false,
109
  "eos_token": "<|endoftext|>",
110
  "model_max_length": 131072,
111
  "pad_token": "<|endoftext|>",
112
+ "tokenizer_class": "SentinelTokenizer",
113
  "unk_token": "<|endoftext|>"
114
  }