futranbg commited on
Commit
e761cd8
1 Parent(s): 4517722

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -54,11 +54,11 @@ def split_text_into_chunks(text, chunk_size=1000):
54
  for line in lines:
55
  # If adding the current line doesn't exceed the chunk size, add the line to the chunk
56
  if len(chunk) + len(line) <= chunk_size:
57
- chunk += line + '\n'
58
  else:
59
  # If adding the line exceeds chunk size, store the current chunk and start a new one
60
  chunks.append(chunk)
61
- chunk = line + '\n'
62
  # Don't forget the last chunk
63
  chunks.append(chunk)
64
  return chunks
@@ -74,10 +74,10 @@ def translation(source, target, text):
74
  stchunk = llm3(input_prompt)
75
  for eot in bloom_model_kwargs['stop']:
76
  stchunk = stchunk.replace(eot,"")
77
- response += stchunk + "\n"
78
  except Exception as e:
79
  print(f"ERROR: LLM show {e}")
80
  if response == "": response = text
81
- return response.strip()
82
 
83
  gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()
 
54
  for line in lines:
55
  # If adding the current line doesn't exceed the chunk size, add the line to the chunk
56
  if len(chunk) + len(line) <= chunk_size:
57
+ chunk += line + "<newline>"
58
  else:
59
  # If adding the line exceeds chunk size, store the current chunk and start a new one
60
  chunks.append(chunk)
61
+ chunk = line + "<newline>"
62
  # Don't forget the last chunk
63
  chunks.append(chunk)
64
  return chunks
 
74
  stchunk = llm3(input_prompt)
75
  for eot in bloom_model_kwargs['stop']:
76
  stchunk = stchunk.replace(eot,"")
77
+ response += stchunk
78
  except Exception as e:
79
  print(f"ERROR: LLM show {e}")
80
  if response == "": response = text
81
+ return response.replace("<newline>","\n")
82
 
83
  gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()