Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into italian
Browse files Browse the repository at this point in the history
  • Loading branch information
enumag committed Dec 2, 2023
2 parents b45ddd7 + 5adde5b commit 82d81ba
Show file tree
Hide file tree
Showing 5 changed files with 149 additions and 22 deletions.
20 changes: 12 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ which should show an error message and list available chapters

Then run

```python build.py onikakushi --translation```
```python build.py onikakushi```

Then the output files will be located in the `output/translation` folder. You can then merge the `HigurashiEp0X_Data` folder with the one in your release. **Please include all the files (not just the `sharedassets0.assets` file), so the installer can select the correct file at install time.**

If you want to rebuild all chapters, run `python build.py all --translation` to build all chapters.
If you want to rebuild all chapters, run `python build.py all` to build all chapters.

### Common Problems

Expand All @@ -99,15 +99,11 @@ You may encounter the following problems:

**NOTE: The script should automatically detect if the vanilla assets or UABE has changed, and re-download them. But if that doesn't work, use the '--force-download' option like so:**

```python build.py rei --translation --force-download```
```python build.py rei --force-download```

## Instructions for Dev Team

For our dev team, the instructions are nearly the same, just remove the `--translation` argument.

```python build.py onikakushi```

Archive files will be automatically created in the `output` folder
Instructions are the same as for translators, but archive files will be automatically created in the `output` folder

----

Expand All @@ -133,6 +129,8 @@ Click on the 'Actions' tab to observe the build process.

Once the build is complete, go to the 'Releases' page, and a new draft release should appear. You can check everything is OK before publishing the release, or just download the files without publishing the release.

Note that doing this will build both the `translation.7z` file for translators to use, and also the individual archives for the 07th-mod developers to use.

### Building `ui-compiler.exe` using Github Actions

To build just the `ui-compiler.exe` using Github Actions (on Github's server), push any tag to the repository.
Expand All @@ -145,6 +143,12 @@ The following information is only used when adding support for a new episode.

Please look through the detailed documentation, especially if you're working on a new chapter, new language, or using UABE - this file does not contain information on those topics.

### WARNING about Unix/MacOS `sharedassets0.assets`

We've found that the MacOS sharedassets can be used on Linux, **but the Linux sharedassets CANNOT be used on MacOS in certain cases**, giving you the "purple text" issue.

For this reason, whenever a new chapter is prepared, the 'vanilla' unix `sharedassets0.assets` should be taken from the MacOS version.

### Preparing font files

You'll need to extract the 'msgothic' font files from the stock `.assets` file before starting:
Expand Down
36 changes: 28 additions & 8 deletions build.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,19 @@ def get_translation_sharedassets_name(self) -> str:
# 'matsuribayashi 5.6.7f1 unix'
],
"matsuribayashi": [
BuildVariant("M_GOG-M_MG-Steam", "matsuribayashi", "2017.2.5", "unix"),
# Based on the GOG MacOS sharedassets, but works on Linux too.
# Working on:
# - Linux Steam (2023-07-09)
# - Linux GOG (2023-07-09)
# - MacOS GOG (2023-07-09)
BuildVariant("GOG-MG-Steam", "matsuribayashi", "2017.2.5", "unix"),

# NOTE: I'm 99% certain this file is no longer used, as we just upgrade the entire GOG/Mangagamer game
# Special version for GOG/Mangagamer Linux with SHA256:
# A200EC2A85349BC03B59C8E2F106B99ED0CBAAA25FC50928BB8BA2E2AA90FCE9
# CRC32L 51100D6D
BuildVariant("L_GOG-L_MG", "matsuribayashi", "2017.2.5", "unix", "51100D6D"),
# BuildVariant("GOG-MG", "matsuribayashi", "2017.2.5", "unix", "51100D6D"), # TO BE REMOVED

BuildVariant("GOG-MG-Steam", "matsuribayashi", "2017.2.5", "win", translation_default=True),
],
'rei': [
Expand Down Expand Up @@ -240,6 +248,13 @@ def save(self):
with open(LastModifiedManager.savePath, 'w') as handle:
json.dump(self.lastModifiedDict, handle)

if sys.version_info < (2, 7):
print(">>>> ERROR: This script does not work on Python 2.7")
exit(-1)

if not (sys.version_info < (3, 11)):
print(">>>> WARNING: This script probably does not work on Python 3.11 because unitypack uses old version of decrunch which does not build. Use Python 3.10 or below if you have this error.")

lastModifiedManager = LastModifiedManager()

# Parse command line arguments
Expand All @@ -252,14 +267,11 @@ def save(self):
choices=["all", "github_actions"] + list(chapter_to_build_variants.keys()),
)
parser.add_argument("--force-download", default=False, action='store_true')
parser.add_argument("--translation", default=False, action='store_true')
parser.add_argument("--disable-translation", default=False, action='store_true')
args = parser.parse_args()

force_download = args.force_download

# NOTE: For now, translation archive output is always enabled, as most of the time this script will be used for translators
translation = args.translation

# Get chapter name from git tag if "github_actions" specified as the chapter
chapter_name = args.chapter
if chapter_name == "github_actions":
Expand All @@ -270,13 +282,21 @@ def save(self):
)
exit(0)

# NOTE: For now, translation archive output is always enabled, as most of the time this script will be used for translators
# NOTE: For now, translation archive output is enabled by default, as most of the time this script will be used for translators
translation = True

if args.disable_translation:
translation = False

# Get a list of build variants (like 'onikakushi 5.2.2f1 win') depending on commmand line arguments
build_variants = get_build_variants(chapter_name)
build_variants_list = "\n - ".join([b.get_build_command() for b in build_variants])
print(f"For chapter '{chapter_name}' building:\n - {build_variants_list}")
print(f"-------- Build Started --------")
print(f"Chapter: [{chapter_name}] | Translation Archive Output: [{('Enabled' if translation else 'Disabled')}]")
print(f"Variants:")
print(f" - {build_variants_list}")
print(f"-------------------------------")
print()

# Install python dependencies
print("Installing python dependencies")
Expand Down
3 changes: 0 additions & 3 deletions compileall.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,10 @@ cargo run meakashi 5.5.3p1 win && \
cargo run meakashi 5.5.3p1 unix && \
cargo run tsumihoroboshi 5.5.3p3 win && \
cargo run tsumihoroboshi 5.5.3p3 unix && \
cargo run tsumihoroboshi 5.6.7f1 win && \
cargo run minagoroshi 5.6.7f1 win && \
cargo run minagoroshi 5.6.7f1 unix && \
cargo run matsuribayashi 5.6.7f1 win && \
cargo run matsuribayashi 5.6.7f1 unix && \
cargo run matsuribayashi 2017.2.5 unix && \
cargo run matsuribayashi 2017.2.5 win && \
cargo run rei 2019.4.3 win && \
cargo run rei 2019.4.4 win && \
cargo run rei 2019.4.3 unix && \
Expand Down
100 changes: 100 additions & 0 deletions scripts/Manually Run Scripts/MergeMultipleTextEdits.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Use this script to merge together multiple text-edits.json files together
# It expects all .json files to be in a folder called 'text-edits' next to this script
# It outputs to a file called merged-translations.json
# Files will be output in alphabetical order, so it is recommended to prepend each file with the chapter number
# If there are any conflicts, and exception will be raised.
#
# This script is usually never needed, unless translators have been swapping out the text-edits.json for each chapter
# rather than maintaining a common text-edits.json for all chapters.

import os
import json


class Fragment:
order = 0

def __init__(self, fragment_as_dictionary: dict[str, str]):
self.current_english = fragment_as_dictionary['CurrentEnglish']
self.current_japanese = fragment_as_dictionary['CurrentJapanese']
self.new_english = fragment_as_dictionary['NewEnglish']
self.new_japanese = fragment_as_dictionary['NewJapanese']
self.discriminator = fragment_as_dictionary.get('Discriminator')
self.order = Fragment.order
Fragment.order += 1

# Generate a key for this Fragment, used later to check for conflicting fragments
self.key = self.current_english + self.current_japanese
if self.discriminator is not None:
self.key += str(self.discriminator)


def equals(self, other: 'Fragment'):
return (
self.current_english == other.current_english and
self.current_japanese == other.current_japanese and
self.new_english == other.new_english and
self.new_japanese == other.new_japanese and
self.discriminator == other.discriminator
)

def __repr__(self) -> str:
return f"{self.order} ce: {self.current_english} cj: {self.current_japanese} ne: {self.new_english} nj: {self.new_japanese} d: {self.discriminator}"

def as_dict(self):
retval = {
'CurrentEnglish': self.current_english,
'CurrentJapanese': self.current_japanese,
'NewEnglish': self.new_english,
'NewJapanese': self.new_japanese,
}

if self.discriminator is not None:
retval['Discriminator'] = self.discriminator

return retval

def merge(all_translations: dict[str, Fragment], fragment: Fragment):

if not fragment.key in all_translations:
all_translations[fragment.key] = fragment
else:
existing_item = all_translations[fragment.key]

if existing_item.equals(fragment):
print(f"Skipping duplicate item {fragment}")
else:
raise Exception(f"Warning: non duplicate item existing:{existing_item} new: {fragment}")



in_folder = "text-edits"

files = os.listdir(in_folder)

all_fragments = [] # type: list[Fragment]

for filename in files:
path = os.path.join(in_folder, filename)
print(f"Parsing {path}")

with open(path, encoding='utf-8') as f:
chapter_list_dict = json.loads(f.read())

all_fragments.extend(Fragment(f) for f in chapter_list_dict)

all_translations = {}

# Merge all fragments into one dict, ignoring duplicates
for f in all_fragments:
print(f.current_english)
merge(all_translations, f)
print()

# Convert to list and sort by 'order' which is the order the fragments were loaded
sorted_translations = list(sorted(all_translations.values(), key=lambda f: f.order))
for item in sorted_translations:
print(item)

with open("merged-translations.json", 'w', encoding='utf-8') as out:
out.write(json.dumps([f.as_dict() for f in sorted_translations], indent=4, ensure_ascii=False))
12 changes: 9 additions & 3 deletions scripts/UnityTextModifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,14 @@ def findInAssetBundle(self, bundle):
offsets.append(offset)
start = offset + 1
if len(offsets) == 0:
raise IndexError(f"No asset found for {self.shortString}")
raise IndexError(f"WARNING: No asset found for {self.shortString}")
if self.discriminator == None:
if len(offsets) > 1:
raise IndexError(f"Multiple assets found for {self.shortString}, candidates are " + ", ".join(f"{index}: 0x{offset:x}" for index, offset in enumerate(offsets)) + ". Please add a field like 'Discriminator: 0' to indicate which block should apply to which asset (do NOT use quotes around the number, do not use the raw address). For an example, see https://github.com/07th-mod/higurashi-dev-guides/wiki/UI-editing-scripts#unitytextmodifier")
raise IndexError(f"WARNING: Multiple assets found for {self.shortString}, candidates are " + ", ".join(f"{index}: 0x{offset:x}" for index, offset in enumerate(offsets)) + ". Please add a field like 'Discriminator: 0' to indicate which block should apply to which asset (do NOT use quotes around the number, do not use the raw address). For an example, see https://github.com/07th-mod/higurashi-dev-guides/wiki/UI-editing-scripts#unitytextmodifier")
self.offset = offsets[0]
else:
if len(offsets) <= self.discriminator:
raise IndexError(f"Not enough offsets found for ${self.trimmedE} / {self.trimmedJ} to meet request for #{self.discriminator}, there were only {len(offsets)}")
raise IndexError(f"WARNING: Not enough offsets found for {self.shortString} to meet request for #{self.discriminator}, there were only {len(offsets)}")
self.offset = offsets[self.discriminator]

def checkObject(self, id, object, bundle):
Expand Down Expand Up @@ -113,6 +113,7 @@ def __str__(self):
try: return f"<ScriptEdit for position 0x{self.offset:x}>"
except: return "<ScriptEdit for unknown position>"

warning_count = 0

with open(sys.argv[2], encoding="utf-8") as jsonFile:
edits = [ScriptEdit.fromJSON(x) for x in json.load(jsonFile)]
Expand All @@ -127,6 +128,8 @@ def __str__(self):
print(f"Found {edit.shortString} at offset 0x{edit.offset:x}")
except IndexError as e:
print(e)
warning_count =+ 1

edits = newEdits

assetsFile.seek(0)
Expand All @@ -136,3 +139,6 @@ def __str__(self):
edit.checkObject(id, obj, bundle)
for edit in edits:
edit.write(sys.argv[3])

if warning_count > 0:
print(">>>>>>>> ONE OR MORE WARNINGS OCCURRED, please check logs! <<<<<<<<<")

0 comments on commit 82d81ba

Please sign in to comment.