from janome.tokenizer import Tokenizer

t = Tokenizer()
text = "マタイについて話したい。"

print(f"Analyzing: {text}")
for token in t.tokenize(text):
    pos = token.part_of_speech.split(',')[0]
    print(f"Surface: {token.surface}, POS: {token.part_of_speech}")

print("\n--- Current Logic Check ---")
tokens = []
for token in t.tokenize(text):
    pos = token.part_of_speech.split(',')[0]
    if pos in ['名詞']:
        tokens.append(token.surface)
print(f"Extracted Nouns: {tokens}")
