mirror of
https://github.com/microsoft/BitNet
synced 2025-04-29 15:47:22 +08:00
fix readme issue and -cnv option issue
This commit is contained in:
parent
aa39c0cdcc
commit
0a446952e1
@ -159,7 +159,7 @@ optional arguments:
|
||||
### Basic usage
|
||||
```bash
|
||||
# Run inference with the quantized model
|
||||
python run_inference.py -m models/Falcon3-7B-Instruct-1.58bit/ggml-model-i2_s.gguf -cnv "You are a helpful assistant"
|
||||
python run_inference.py -m models/Falcon3-7B-Instruct-1.58bit/ggml-model-i2_s.gguf -p "You are a helpful assistant" -cnv
|
||||
|
||||
# Output:
|
||||
# Daniel went back to the the the garden. Mary travelled to the kitchen. Sandra journeyed to the kitchen. Sandra went to the hallway. John went to the bedroom. Mary went back to the garden. Where is Mary?
|
||||
@ -167,7 +167,7 @@ python run_inference.py -m models/Falcon3-7B-Instruct-1.58bit/ggml-model-i2_s.gg
|
||||
|
||||
```
|
||||
<pre>
|
||||
usage: run_inference.py [-h] [-m MODEL] [-n N_PREDICT] -p PROMPT [-t THREADS] [-c CTX_SIZE] [-temp TEMPERATURE]
|
||||
usage: run_inference.py [-h] [-m MODEL] [-n N_PREDICT] -p PROMPT [-t THREADS] [-c CTX_SIZE] [-temp TEMPERATURE] [-cnv]
|
||||
|
||||
Run inference
|
||||
|
||||
@ -185,6 +185,7 @@ optional arguments:
|
||||
Size of the prompt context
|
||||
-temp TEMPERATURE, --temperature TEMPERATURE
|
||||
Temperature, a hyperparameter that controls the randomness of the generated text
|
||||
-cnv, --conversation Whether to enable chat mode or not (for instruct models.)
|
||||
</pre>
|
||||
|
||||
### Benchmark
|
||||
|
@ -31,8 +31,9 @@ def run_inference():
|
||||
'-c', str(args.ctx_size),
|
||||
'--temp', str(args.temperature),
|
||||
"-b", "1",
|
||||
"-cnv" if args.conversation else ""
|
||||
]
|
||||
if args.conversation:
|
||||
command.append("-cnv")
|
||||
run_command(command)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
|
@ -27,19 +27,19 @@ SUPPORTED_HF_MODELS = {
|
||||
"model_name": "Falcon3-7B-1.58bit",
|
||||
},
|
||||
"tiiuae/Falcon3-10B-Instruct-1.58bit": {
|
||||
"model_name": "Falcon3-10B-1.58bit",
|
||||
"model_name": "Falcon3-10B-Instruct-1.58bit",
|
||||
},
|
||||
"tiiuae/Falcon3-10B-1.58bit": {
|
||||
"model_name": "Falcon3-10B-1.58bit",
|
||||
},
|
||||
"tiiuae/Falcon3-3B-Instruct-1.58bit": {
|
||||
"model_name": "Falcon3-3B-1.58bit",
|
||||
"model_name": "Falcon3-3B-Instruct-1.58bit",
|
||||
},
|
||||
"tiiuae/Falcon3-3B-1.58bit": {
|
||||
"model_name": "Falcon3-3B-1.58bit",
|
||||
},
|
||||
"tiiuae/Falcon3-1B-Instruct-1.58bit": {
|
||||
"model_name": "Falcon3-1B-1.58bit",
|
||||
"model_name": "Falcon3-1B-Instruct-1.58bit",
|
||||
},
|
||||
}
|
||||
|
||||
@ -141,7 +141,7 @@ def setup_gguf():
|
||||
def gen_code():
|
||||
_, arch = system_info()
|
||||
|
||||
llama3_f3_models = ["Llama3-8B-1.58-100B-tokens", "Falcon3-7B-1.58bit", "Falcon3-10B-1.58bit", "Falcon3-3B-1.58bit", "Falcon3-1B-1.58bit", "Falcon3-1B-Instruct-1.58bit", "Falcon3-3B-Instruct-1.58bit", "Falcon3-7B-Instruct-1.58bit", "Falcon3-10B-Instruct-1.58bit"]
|
||||
llama3_f3_models = set([model['model_name'] for model in SUPPORTED_HF_MODELS.values() if model['model_name'].startswith("Falcon3") or model['model_name'].startswith("Llama")])
|
||||
|
||||
if arch == "arm64":
|
||||
if args.use_pretuned:
|
||||
|
Loading…
x
Reference in New Issue
Block a user