Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	update
Browse files
    	
        main.py
    CHANGED
    
    | @@ -8,12 +8,15 @@ from funix.hint import HTML | |
| 8 |  | 
| 9 | 
             
            low_memory = True  # Set to True to run on mobile devices
         | 
| 10 |  | 
|  | |
|  | |
|  | |
| 11 | 
             
            ku_gpt_tokenizer = AutoTokenizer.from_pretrained("ku-nlp/gpt2-medium-japanese-char")
         | 
| 12 | 
            -
            chj_gpt_tokenizer = AutoTokenizer.from_pretrained("TURX/chj-gpt2")
         | 
| 13 | 
            -
            wakagpt_tokenizer = AutoTokenizer.from_pretrained("TURX/wakagpt")
         | 
| 14 | 
             
            ku_gpt_model = AutoModelForCausalLM.from_pretrained("ku-nlp/gpt2-medium-japanese-char")
         | 
| 15 | 
            -
            chj_gpt_model = AutoModelForCausalLM.from_pretrained("TURX/chj-gpt2")
         | 
| 16 | 
            -
            wakagpt_model = AutoModelForCausalLM.from_pretrained("TURX/wakagpt")
         | 
| 17 |  | 
| 18 | 
             
            print("Models loaded successfully.")
         | 
| 19 |  | 
|  | |
| 8 |  | 
| 9 | 
             
            low_memory = True  # Set to True to run on mobile devices
         | 
| 10 |  | 
| 11 | 
            +
            import os
         | 
| 12 | 
            +
            hf_token = os.environ.get("HF_TOKEN")
         | 
| 13 | 
            +
             | 
| 14 | 
             
            ku_gpt_tokenizer = AutoTokenizer.from_pretrained("ku-nlp/gpt2-medium-japanese-char")
         | 
| 15 | 
            +
            chj_gpt_tokenizer = AutoTokenizer.from_pretrained("TURX/chj-gpt2", token=hf_token)
         | 
| 16 | 
            +
            wakagpt_tokenizer = AutoTokenizer.from_pretrained("TURX/wakagpt", token=hf_token)
         | 
| 17 | 
             
            ku_gpt_model = AutoModelForCausalLM.from_pretrained("ku-nlp/gpt2-medium-japanese-char")
         | 
| 18 | 
            +
            chj_gpt_model = AutoModelForCausalLM.from_pretrained("TURX/chj-gpt2", token=hf_token)
         | 
| 19 | 
            +
            wakagpt_model = AutoModelForCausalLM.from_pretrained("TURX/wakagpt", token=hf_token)
         | 
| 20 |  | 
| 21 | 
             
            print("Models loaded successfully.")
         | 
| 22 |  | 
