Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 12 additions & 3 deletions src/Auto3D/ASE/geometry.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
torch.backends.cudnn.allow_tf32 = False


def opt_geometry(path: str, model_name:str, gpu_idx=0, opt_tol=0.003, opt_steps=5000):
def opt_geometry(path: str, model_name:str, gpu_idx=0, opt_tol=0.003, opt_steps=5000,
patience=None, batchsize_atoms=4096):
"""
Geometry optimization interface with FIRE optimizer.

Expand All @@ -30,6 +31,10 @@ def opt_geometry(path: str, model_name:str, gpu_idx=0, opt_tol=0.003, opt_steps=
:type opt_tol: float, optional
:param opt_steps: Maximum geometry optimization steps, defaults to 5000
:type opt_steps: int, optional
:param patience: A conformer will be dropped if the force does not decrease after patience steps, defaults to None
:type patience: int, optional
:param batchsize_atoms: Batch size for the atoms, defaults to 4096. Recommended to be 1024 * memory_size (in GB)
:type batchsize_atoms: int, optional
"""
ev2hatree = 1/hartree2ev
#create output path that is in the same directory as the input file
Expand All @@ -45,8 +50,12 @@ def opt_geometry(path: str, model_name:str, gpu_idx=0, opt_tol=0.003, opt_steps=
else:
device = torch.device("cpu")

opt_config = {"opt_steps": opt_steps, "opttol": opt_tol,
"patience": opt_steps, "batchsize_atoms": 1024}
if patience:
opt_config = {"opt_steps": opt_steps, "opttol": opt_tol,
"patience": patience, "batchsize_atoms": batchsize_atoms}
else:
opt_config = {"opt_steps": opt_steps, "opttol": opt_tol,
"patience": opt_steps, "batchsize_atoms": batchsize_atoms}
opt_engine = optimizing(path, outpath, model_name, device, opt_config)
opt_engine.run()

Expand Down